コード例 #1
0
    def show_daily_digest(self):
        print("============ What I've done today: ============")
        date_today = date.fromtimestamp(time.time())
        os.chdir(self.data_path)
        file_name_list = glob.glob('*.xml')
        final_status_string = ''
        final_diary_string = ''
        for a_file in file_name_list:
            (short_name, extension) = os.path.splitext(a_file)
            job_path = os.path.join(self.data_path, a_file)
            entry = EntryElement()
            entry.read_from_xml(job_path)
            temp_status_string = ''
            temp_comment_string = ''
            status_modified_today = False
            comment_modified_today = False
            for loop in entry.status_change_list:
                if date.fromtimestamp(float(loop['time'])) == date_today:
                    status_modified_today = True
                    temp_status_string += time.strftime('\t%H:%M:%S: ',
                                                        time.localtime(float(loop['time']))) + loop['to'] + '\n'
            if status_modified_today is True:
                final_status_string += 'Spent time on ' + short_name + ':\n' + temp_status_string
            for loop in entry.comment_list:
                if date.fromtimestamp(float(loop['time'])) == date_today:
                    comment_modified_today = True
                    temp_comment_string += time.strftime('\t%H:%M:%S: ', time.localtime(float(loop['time']))) + \
                                           loop['content'] +'\n'
            if comment_modified_today is True:
                final_diary_string += 'Comments about ' + short_name + ':\n' +\
                                      temp_comment_string

        print(final_status_string)
        print("====== What I wanted to remember today: ======")
        print(final_diary_string)
コード例 #2
0
ファイル: script.py プロジェクト: guigeek208/scripts
def parseRSS(directory,afterdate, delta,checklist):
    summary=[]
    d = feedparser.parse(URL+directory+'?fmt=rss&query=after:'+afterdate)
    today = date.today()
    deltadays = date.today() - datetime.timedelta(days=delta)
    
    
    for echeck in checklist:
        found=0
        Boolexpired=False
        for e in d['entries']:
            daterss = e.date_parsed
            fdate = date.fromtimestamp(time.mktime(daterss))     
            if (e.get('title', '').encode('utf-8') == echeck):
                Boolexpired=(today - fdate > datetime.timedelta(days=delta))
                found=1
                break
        if (found == 0 or Boolexpired): 
            boolERR=True     
            if (Boolexpired):
                deltasave = today - fdate
                print (echeck + " from "+str(deltasave))
            if (found == 0):
                listERR.append(echeck+"\n")
                listERR.append("Aucune notification depuis plus de "+str(delta)+" jours\n")
    
    listLOG.append ("\nRésumé des dernières sauvegardes pour "+directory+"\n")
    for e in d['entries']:
        daterss2 = e.date_parsed
        if (not isinstance(daterss2,str)):
            fdate = date.fromtimestamp(time.mktime(daterss2))            
            listLOG.append(str(fdate) + " " + e.get('title', '').encode('utf-8')+"\n")      
コード例 #3
0
def trends_json(request):
   if request.method == 'GET':
      GET = request.GET
      if GET.has_key('sDate') and GET.has_key('eDate'):
         eStats = EvernoteStatistics(request.user.profile)
         startDate = date.fromtimestamp(float(GET['sDate'])/1000)
         endDate = date.fromtimestamp(float(GET['eDate'])/1000)
         filt = eStats.create_date_filter(startDate, endDate)
         if GET.has_key('tag'):
            filt = eStats.create_guid_filter(GET['tag'],False,filt)    
         if GET.has_key('notebook'):
            filt = eStats.create_guid_filter(GET['notebook'],True,filt)    
         #if the time frame is across multiple years then use months
         formattedTrend = [["Date","Notes"]]
         if (endDate.year - startDate.year) > 1:
            dateTrends = eStats.get_date_trends(True,filt)
            for dt in rrule.rrule(rrule.MONTHLY, dtstart=startDate, 
                                                 until=endDate):
               formattedTrend.append([dt.strftime("%b \'%y"),
                                     dateTrends[dt.strftime("%b \'%y")]])
         else:
            dateTrends = eStats.get_date_trends(False,filt)
            for dt in rrule.rrule(rrule.DAILY, dtstart=startDate, 
                                               until=endDate):
               formattedTrend.append([dt.strftime("%d %b"),
                                     dateTrends[dt.strftime("%d %b")]])
         jsonText = json.dumps({'data': formattedTrend,
                                'title': "Total Notes"})
         return HttpResponse(jsonText,content_type='application/json')
コード例 #4
0
    def generate_daily_digest_an_entry(self, job_entry: EntryElement):
        entry = job_entry
        temp_status_string = '\t\t<ul>\n'
        temp_comment_string = '\t\t<ul>\n'
        status_modified_today = False
        comment_modified_today = False
        for loop in entry.status_change_list:
            if date.fromtimestamp(float(loop['time'])) == self.date:
                status_modified_today = True
                temp_status_string += '\t\t\t<li>' + time.strftime('`%H:%M:%S`',
                                                                   time.localtime(float(loop['time']))) + loop[
                                          'to'] + '</li>\n'
        temp_status_string += '\t\t</ul>\n'
        if status_modified_today is True:
            self.final_status_string += '\t<li>Spent time on ' + entry.name + ':\n' + temp_status_string + '\t</li>\n'

        for loop in entry.comment_list:
            if date.fromtimestamp(float(loop['time'])) == self.date:
                comment_modified_today = True
                temp_comment_string += '\t\t\t<li>' + time.strftime('`%H:%M:%S`',
                                                                    time.localtime(float(loop['time']))) + \
                                       loop['content'] + '</li>\n'
        temp_comment_string += '\t\t</ul>\n'
        if comment_modified_today is True:
            self.final_comment_string += '\t<li>Story about ' + entry.name + ':\n' + \
                                         temp_comment_string + '\t</li>\n'
コード例 #5
0
    def get_history(self, account):
        if not account._consultable:
            raise NotImplementedError()

        offset = 0
        next_page = True
        while next_page:
            r = self.open('/transactionnel/services/applications/operations/get/%(number)s/%(nature)s/00/%(currency)s/%(startDate)s/%(endDate)s/%(offset)s/%(limit)s' %
                          {'number': account._number,
                           'nature': account._nature,
                           'currency': account.currency,
                           'startDate': '2000-01-01',
                           'endDate': date.today().strftime('%Y-%m-%d'),
                           'offset': offset,
                           'limit': 50
                          })
            next_page = False
            offset += 50
            for op in r.json()['content']['operations']:
                next_page = True
                t = Transaction()
                t.id = op['id']
                t.amount = Decimal(str(op['montant']))
                t.date = date.fromtimestamp(op.get('dateDebit', op.get('dateOperation'))/1000)
                t.rdate = date.fromtimestamp(op.get('dateOperation', op.get('dateDebit'))/1000)
                t.vdate = date.fromtimestamp(op.get('dateValeur', op.get('dateDebit', op.get('dateOperation')))/1000)
                if 'categorie' in op:
                    t.category = op['categorie']
                t.label = op['libelle']
                t.raw = ' '.join([op['libelle']] + op['details'])
                yield t
コード例 #6
0
ファイル: browser.py プロジェクト: P4ncake/weboob
    def get_history(self, account, coming=False):
        if account.type is Account.TYPE_LOAN or not account._consultable:
            raise NotImplementedError()

        if account._univers != self.current_univers:
            self.move_to_univers(account._univers)

        today = date.today()
        seen = set()
        offset = 0
        next_page = True
        while next_page:
            operation_list = self._make_api_call(
                account=account,
                start_date=date(day=1, month=1, year=2000), end_date=date.today(),
                offset=offset, max_length=50,
            )
            transactions = []
            for op in reversed(operation_list):
                t = Transaction()
                t.id = op['id']
                if op['id'] in seen:
                    raise ParseError('There are several transactions with the same ID, probably an infinite loop')

                seen.add(t.id)
                d = date.fromtimestamp(op.get('dateDebit', op.get('dateOperation'))/1000)
                op['details'] = [re.sub('\s+', ' ', i).replace('\x00', '') for i in op['details'] if i]  # sometimes they put "null" elements...
                label = re.sub('\s+', ' ', op['libelle']).replace('\x00', '')
                raw = ' '.join([label] + op['details'])
                vdate = date.fromtimestamp(op.get('dateValeur', op.get('dateDebit', op.get('dateOperation')))/1000)
                t.parse(d, raw, vdate=vdate)
                t.amount = Decimal(str(op['montant']))
                t.rdate = date.fromtimestamp(op.get('dateOperation', op.get('dateDebit'))/1000)
                if 'categorie' in op:
                    t.category = op['categorie']
                t.label = label
                t._coming = op['intraday']
                if t._coming:
                    # coming transactions have a random uuid id (inconsistent between requests)
                    t.id = ''
                t._coming |= (t.date > today)

                if t.type == Transaction.TYPE_CARD and account.type == Account.TYPE_CARD:
                    t.type = Transaction.TYPE_DEFERRED_CARD

                transactions.append(t)

            # Transactions are unsorted
            for t in sorted_transactions(transactions):
                if coming == t._coming:
                    yield t
                elif coming and not t._coming:
                    # coming transactions are at the top of history
                    self.logger.debug('stopping coming after %s', t)
                    return

            next_page = bool(transactions)
            offset += 50

            assert offset < 30000, 'the site may be doing an infinite loop'
コード例 #7
0
ファイル: key-report.py プロジェクト: NickDaly/key-report
def parse(line):
    """Split PGP lines: returns key ID, status, creation date, and expiration.

    >>> parse("pub:f:4096:1:0000000000000001:0:86400::q:::scESC:")
    ('0000000000000001', 'full', datetime.date(1969, 12, 31), datetime.date(1970, 1, 1))

    """
    key = line.split(":")[4]
    created = date.fromtimestamp(float(line.split(":")[5]))
    try:
        expires = date.fromtimestamp(float(line.split(":")[6]))
    except ValueError:
        expires = date.max

    try:
        status = { "o" : "unknown",
                   "i" : "invalid",
                   "r" : "revoked",
                   "e" : "expired",
                   "-" : "unknown",
                   "q" : "undefined",
                   "n" : "untrusted",
                   "m" : "marginal",
                   "f" : "full",
                   "u" : "ultimate",
            }[line.split(":")[1]]
    except KeyError:
        status = "unknown"

    return (key, status, created, expires)
コード例 #8
0
ファイル: Mendeley2Moin.py プロジェクト: rfunke/Mendeley2Moin
	def fill_template_with_doc(self, doc, template):
		blob = pformat(doc)
		doc['wiki_blob'] = blob
		doc['wiki_author_lastnames'] = join([x['surname'] for x in doc['authors']], ', ')
		doc['wiki_mendeley_createtime'] = date.fromtimestamp(doc['added']).strftime('%Y-%m-%d')
		doc['wiki_mendeley_modtime'] = date.fromtimestamp(doc['modified']).strftime('%Y-%m-%d')
		doc['wiki_category'] = ''
		if not 'notes' in doc:
			doc['notes'] = ''
		if(doc['type']=='Conference Proceedings'):
			doc['wiki_category'] += 'CategoryConference '
		elif(doc['type']=='Journal Article'):
			doc['wiki_category'] += 'CategoryJournal '
		for tag in doc['tags']:
			doc['wiki_category'] += 'CategoryMendeley'+tag.title()+' '
		re_nonlinebreak = re.compile('\\n')
		re_linebreak = re.compile('\<m:linebreak\>\<\/m:linebreak\>')
		re_italic = re.compile('\<([/]?)m:italic\>')
		re_bold = re.compile('\<([/]?)m:bold\>')
		re_underline = re.compile('\<([/]?)m:underline\>')
		re_right = re.compile('\<([/]?)m:right\>')
		re_center = re.compile('\<([/]?)m:center\>')
		doc['wiki_notes'] = re_center.sub('<br>', re_right.sub('<br>', re_underline.sub(r'<\1u>', re_bold.sub(r'<\1b>', \
			re_italic.sub(r'<\1i>',re_linebreak.sub('<br>',re_nonlinebreak.sub('', doc['notes'])))))))
		#<m:bold>123</m:bold>        <i>italic</i>        <m:underline>under</m:underline>        <br>        <m:center>center                </m:center>        <m:right>
		tmpl = Template(template)
		return tmpl.safe_substitute(doc)
コード例 #9
0
def VIEWLISTOFRECORDEDSHOWS(url,name):
    #Get the list of Recorded shows
    now = time.time()
    strNowObject = date.fromtimestamp(now)
    now = "%02d.%02d.%s" % (strNowObject.day+1, strNowObject.month, strNowObject.year)
    titleObjects = executeSagexAPIJSONCall(url, "Result")
    titles = titleObjects.keys()
    totalEpisodesForAllShows = 0
    totalEpisodesWatchedForAllShows = 0
    for title in titles:
        mfsForTitle = titleObjects.get(title)
        for mfSubset in mfsForTitle:
            strTitle = mfSubset.get("ShowTitle")
            strTitleEncoded = strTitle.encode("utf8")
            strMediaFileID = mfSubset.get("MediaFileID")
            strExternalID = mfSubset.get("ShowExternalID")
            strGenre = mfSubset.get("ShowGenre")
            startTime = float(mfSubset.get("AiringStartTime") // 1000)
            strAiringdateObject = date.fromtimestamp(startTime)
            strAiringdate = "%02d.%02d.%s" % (strAiringdateObject.day, strAiringdateObject.month, strAiringdateObject.year)
            totalEpisodesForShow = mfSubset.get("TotalEpisodes")
            totalEpisodesWatchedForShow = mfSubset.get("TotalWatchedEpisodes")
            totalEpisodesForAllShows = totalEpisodesForAllShows + totalEpisodesForShow
            totalEpisodesWatchedForAllShows = totalEpisodesWatchedForAllShows + totalEpisodesWatchedForShow
            break
        urlToShowEpisodes = strUrl + '/sagex/api?c=xbmc:GetMediaFilesForShowWithSubsetOfProperties&1=' + urllib2.quote(strTitleEncoded) + '&size=500&encoder=json'
        #urlToShowEpisodes = strUrl + '/sagex/api?command=EvaluateExpression&1=FilterByMethod(GetMediaFiles("T"),"GetMediaTitle","' + urllib2.quote(strTitle.encode("utf8")) + '",true)&size=500&encoder=json'
        #urlToShowEpisodes = strUrl + '/sage/Search?searchType=TVFiles&SearchString=' + urllib2.quote(strTitle.encode("utf8")) + '&DVD=on&sort2=airdate_asc&partials=both&TimeRange=0&pagelen=100&sort1=title_asc&filename=&Video=on&search_fields=title&xml=yes'
        print "ADDING strTitleEncoded=" + strTitleEncoded + "; urlToShowEpisodes=" + urlToShowEpisodes
        imageUrl = strUrl + "/sagex/media/poster/" + strMediaFileID
        fanartUrl = strUrl + "/sagex/media/background/" + strMediaFileID
        #print "ADDING imageUrl=" + imageUrl
        addDir(strTitleEncoded, urlToShowEpisodes,11,imageUrl,'',strExternalID,strAiringdate,fanartUrl,totalEpisodesForShow,totalEpisodesWatchedForShow,strGenre)
    addDir('[All Shows]',strUrl + '/sagex/api?c=xbmc:GetMediaFilesForShowWithSubsetOfProperties&1=&size=500&encoder=json',11,IMAGE_POSTER,IMAGE_THUMB,'',now,'',totalEpisodesForAllShows,totalEpisodesWatchedForAllShows,'')
コード例 #10
0
ファイル: Share.py プロジェクト: MaximeCheramy/pyrex
 def close(self, name, buf):
     if name == "share":
         if self.share_type == "file":
             self.share = FileShare(
                 self.data["name"],
                 self.data["client_address"],
                 int(self.data["port"]),
                 self.data["path"],
                 self.data["protocol"],
                 float(self.data["size"]),
                 date.fromtimestamp(int(self.data["last_modified"]) / 1000),
                 self.data["nickname"],
             )
         else:
             self.share = DirectoryShare(
                 self.data["name"],
                 self.data["client_address"],
                 int(self.data["port"]),
                 self.data["path"],
                 self.data["protocol"],
                 date.fromtimestamp(int(self.data["last_modified"]) / 1000),
                 self.data["nickname"],
             )
     else:
         self.data[name] = buf
コード例 #11
0
ファイル: query_db.py プロジェクト: resmio/almighty-gecko
def intercom_companies():
    """ Get all companies from Intercom and return the data as a pandas
        DataFrame.

    """
    Intercom.app_id = get_config('INTERCOM_APP_ID')
    Intercom.app_api_key = get_config('INTERCOM_API_KEY')
    company_list = [FlatDict(c.to_dict) for c in Company.all()
                    if 'Verified' in c.custom_attributes and
                    c.custom_attributes['Verified']]
    companies = []
    for c in company_list:
        dic = {}
        for k, v in c.iteritems():
            kn = k.lower().split(':')[-1].replace(' ', '_')
            dic[kn] = v
        companies.append(dic)
    companies = pd.DataFrame(companies)
    companies = companies.T.drop(
        ['0', '1', 'id', 'widget_integrated', 'app_id',
         'automatic_confirm_bookings', 'minimum_book_in_advance_hours',
         'phone_number', 'monthly_spend']).T
    companies.last_request_at = companies.last_request_at.apply(
        lambda x: date.fromtimestamp(x))
    companies.created_at = companies.created_at.apply(
        lambda x: date.fromtimestamp(x))
    companies.remote_created_at = companies.remote_created_at.apply(
        lambda x: date.fromtimestamp(x))
    companies.updated_at = companies.updated_at.apply(
        lambda x: date.fromtimestamp(x))
    return companies
コード例 #12
0
ファイル: tweet_sentiment.py プロジェクト: kaaquist/pystocks
def _filter_on_date(data, company_name, start, end):
    """
    Filter out all sentiments whose timestamp is not between start and end.
    """
    filtered_data = {}

    start_date = date.fromtimestamp(1)
    try:
        start_date = date.fromtimestamp(start)
    except:
        pass
    
    end_date = date.fromtimestamp(10e10)
    try:
        end_date = date.fromtimestamp(end)
    except:
        pass

    for s_date, sentiment in data.items():
        splits = s_date.split('-')
        year = int(splits[0])
        month = int(splits[1])
        day = int(splits[2])

        date_obj = date(year, month, day)

        if date_obj > end_date:
            continue
        if date_obj < start_date:
            continue

        filtered_data[s_date] = sentiment

    return filtered_data
コード例 #13
0
ファイル: models.py プロジェクト: mareksom/probset
	def check(self):
		err = self.Error()
		if not 0 < len(self.name) <= 100:
			err.name = "Contest name length should be from range [1, 100]."

		if type(self.begin_date) is str:
			try:
				self.begin_date = date.fromtimestamp(time.mktime(time.strptime(self.begin_date, "%d-%m-%Y")))
			except ValueError:
				err.begin_date = 'The date should be given in format "dd-mm-yyyy".'
		elif self.begin_date is None:
			err.begin_date = "You must specify the date of the beginning of the contest."

		if type(self.end_date) is str:
			try:
				self.end_date = date.fromtimestamp(time.mktime(time.strptime(self.end_date, "%d-%m-%Y")))
			except ValueError:
				err.end_date = 'The date should be given in format "dd-mm-yyyy".'
		elif self.end_date is None:
			err.end_date = "You must specify the date of the end of the contest."

		if not err.begin_date and not err.end_date:
			if self.begin_date > self.end_date:
				err.begin_date = "The contest cannot begin after the end of the contest."
				err.end_date = "The contest cannot end before the beginning of the contest."

		if err.is_error():
			raise err
コード例 #14
0
ファイル: __init__.py プロジェクト: k3njiy/ewrt
    def _getDate(element, maxAge=0):
        ''' returns the date of the link '''

        p = re.compile('(\d+)\s(\w+)\sago')

        minDate = (date.fromtimestamp(time.time()) - timedelta(days=maxAge))
        linkDate = None
        for div in element.xpath('.//div'):
            m = p.search(div.text)
            if m:
                age = int(m.groups()[0])
                timeDim = m.groups()[1]

                if re.search('week.?', timeDim):
                    hours = age * 7 * 24
                elif re.search('day.?', timeDim):
                    hours = age * 24
                elif re.search('hour.?', timeDim):
                    hours = age
                elif re.search('minute.?', timeDim):
                    hours = 1
                else:
                    hours = 0

                if hours > 0:
                    linkDate = date.fromtimestamp(time.time()) - timedelta(hours=hours)

                    if linkDate < minDate and maxAge > 0:
                        linkDate = None
            else:
                logger.warning("Could not find a date")

        return linkDate
コード例 #15
0
ファイル: sms-words.py プロジェクト: kevinselwyn/sms-words
def analyze(infile='', outfile='', a='Person A', b='Person B', maximum=100, width=2000, height=1000):
    """Analyze SMSes"""

    root = parse_smses(infile)
    start = get_start_date(root)
    end = get_end_date(root)
    count = get_word_count(root, maximum)

    data_a = {
        'words': count['1']['words'],
        'count': count['1']['count'],
        'name':  a,
        'date':  date.fromtimestamp(start).strftime("%m-%d-%Y")
    }

    data_b = {
        'words': count['2']['words'],
        'count': count['2']['count'],
        'name':  b,
        'date':  date.fromtimestamp(end).strftime("%m-%d-%Y")
    }

    if not outfile:
        outfile = infile.rsplit('.', 1)[0] + '.png'

    draw_graph(filename=outfile, data_a=data_a, data_b=data_b, width=width, height=height)
コード例 #16
0
ファイル: search.py プロジェクト: 3taps/3TapsSearchDB
	def make_index(self, params, tier_info):
		if 'days_back' in params:
			days_back = convert_int('days_back', params.get('days_back'))
			ts_filter = parse_timestamp('timestamp', '%sd..'%days_back)
		elif 'timestamp' in params:
			ts_filter = parse_timestamp('timestamp', params['timestamp'])
		else:
			return self.make_index_list(tier_info['start'],
			                            tier_info['end'],
			                            tier_info['prefix'])
		if 'range' in ts_filter:
			ts_range = ts_filter['range']['timestamp']
			start = ts_range.get('from')
			if start is not None:
				start = max(tier_info['start'], date.fromtimestamp(start))
			else:
				start = tier_info['start']
			end = ts_range.get('to')
			if end is not None:
				end = min(tier_info['end'], date.fromtimestamp(end))
			else:
				end = tier_info['end']
			return self.make_index_list(start, end, tier_info['prefix'])
		else:
			# this shouldn't happen any more...
			raise BadRequest('timestamp', params['timestamp'])
コード例 #17
0
ファイル: calendar.py プロジェクト: markisus/Alternote
 def get(self, class_id):
     if db.classes.check_members(class_id, self.get_current_user()):
         start = self.get_argument("start")
         end = self.get_argument("end")
         #The calendar program asks for 
         start = date.fromtimestamp(int(start))
         end = date.fromtimestamp(int(end))
         
         start_string = start.isoformat()[:16] #Lop off the seconds data
         end_string = end.isoformat()[:16]
         
         search_results = db.calendar.search_items(start_string, end_string)
         translated = list()
         #Search results is not in the proper format. Translate them
         for result in search_results:
             item = {'id':str(result['_id']), 
                     'title':result.get('title') or result['type'],  
                     'start':result['start'],
                     'end':result['finish'],
                     'url':'/calendar/details/' + str(result['_id']),
                     'className':result['type'],
                     'editable':False}
             translated.append(item)
             
         print(search_results)
         event = [{'id':111, 'title':"Event1", 'start':"2011-11-10", 'url':'http://yahoo.com/'}]
         self.write(json.dumps(translated))
     else:
         self.write("") #Is it better to return 404?
コード例 #18
0
 def post_to_renren(self):
     date_today = date.fromtimestamp(time.time())
     os.chdir(self.data_path)
     file_name_list = glob.glob('*.xml')
     digest_path = os.path.join(self.digest_path, 'DailyDigest' + str(date_today) + '.md')
     generator = MarkDownGenerator(os.path.expanduser(digest_path),date_today)
     for a_file in file_name_list:
         job_path = os.path.join(self.data_path, a_file)
         entry = EntryElement()
         entry.read_from_xml(job_path)
         status_modified_today = False
         comment_modified_today = False
         for loop in entry.status_change_list:
             if date.fromtimestamp(float(loop['time'])) == date_today:
                 status_modified_today = True
                 break
         for loop in entry.comment_list:
             if date.fromtimestamp(float(loop['time'])) == date_today:
                 comment_modified_today = True
                 break
         if (comment_modified_today is True) | (status_modified_today is True):
             generator.generate_daily_digest_an_entry(entry)
     digest =markdown.markdown(str(generator.generate_daily_digest()))
     renren_handler = Renren()
     renren_handler.setAccount('*****@*****.**', 'aa9-q9d302')
     renren_handler.login()
     renren_handler.post_renren_blog('DailyDigest' + str(date_today), digest)
コード例 #19
0
ファイル: server.py プロジェクト: sunank200/sepal
def preview():
    aoi = _countryGeometry(request.args.get('country'))
    from_date = date.fromtimestamp(int(request.args.get('fromDate')) / 1000.0).isoformat() + 'T00:00'
    to_date = date.fromtimestamp(int(request.args.get('toDate')) / 1000.0).isoformat() + 'T00:00'
    sensors = request.args.get('sensors').split(',')
    bands = request.args.get('bands')
    mosaic = landsat.create_mosaic(
        aoi=aoi,
        sensors=sensors,
        from_date=from_date,
        to_date=to_date,
        target_day_of_year=int(request.args.get('targetDayOfYear')),
        from_day_of_year=int(request.args.get('fromDayOfYear')),
        to_day_of_year=int(request.args.get('toDayOfYear')),
        bands=bands.split(', ')
    )

    mapid = mosaic.getMapId({
        'bands': bands,
        'min': 100,
        'max': 5000,
        'gamma': 1.2
    })
    bounds = aoi.geometry().bounds().getInfo()['coordinates'][0][1:]
    return json.dumps({
        'mapId': mapid['mapid'],
        'token': mapid['token'],
        'bounds': bounds
    })
コード例 #20
0
ファイル: templateUtils.py プロジェクト: tgallacher/TNTFL-web
def formatTime(inTime):
    time = datetime.fromtimestamp(float(inTime))
    dateStr = time.strftime("%Y-%m-%d %H:%M")
    if date.fromtimestamp(float(inTime)) == date.today():
        dateStr = "%02d:%02d" % (time.hour, time.minute)
    elif date.fromtimestamp(float(inTime)) > (date.today() - timedelta(7)):
        dateStr = "%s %02d:%02d" % (("Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun")[time.weekday()], time.hour, time.minute)
    return dateStr
コード例 #21
0
def getSEOUV(days='1',city='全国',type='all'):
    
    global crm
    
    dt = date.fromtimestamp(time.time()-86400)
    EndDate = dt.strftime('%Y/%m/%d')
    
    try:
        dt = date.fromtimestamp(time.time()-int(days)*86400)
    except:
        dt = date.fromtimestamp(time.time()-2*86400)
        
    StartDate = dt.strftime('%Y/%m/%d')

    sourceURL = 'http://stat.corp.ganji.com/CustomBaseReportManagement/CustomBaseReportManager?reportId=20035'
    
    crm.headers = {'Accept':'text/plain, */*; q=0.01',
                'Accept-Encoding':'gzip, deflate',
                'Accept-Language':'zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3',
                'Cache-Control':'no-cache',
                'Connection':'keep-alive',
                'Content-Length':'204',
                'Content-Type':'application/x-www-form-urlencoded; charset=UTF-8',
                'Cookie':'gj_inner_acc=1-108056; __utma=32156897.369321906.1427440588.1427444388.1427444388.1; ganji_uuid=8458223866562113922380-140304361; ganji_xuuid=832ad534-b0c4-4ea3-a8c6-ef4ca9ddbd22; citydomain=bj; tuiguang_user=1; vip_version=new; cityDomain=bj; __utmganji_v20110909=0x67309c4035c9c7202856540eabd8a28; __utmz=32156897.1427444388.1.1.utmcsr=bj.ganji.com|utmccn=(referral)|utmcmd=referral|utmcct=/fang5/a5k897/; lg=1; fangvip_free_refresh_hint=true; NTKF_T2D_CLIENTID=guestE8AF95A6-2B8C-BA3C-F4DE-12FC51C8752A; wap_banner_list=last; __utmc=32156897; _gl_tracker=%7B%22ca_source%22%3A%22www.baidu.com%22%2C%22ca_name%22%3A%22se%22%2C%22ca_kw%22%3A%22%25E8%25B5%25B6%25E9%259B%2586%25E7%25BD%2591%7Cutf8%22%2C%22ca_id%22%3A%22-%22%2C%22ca_s%22%3A%22seo_baidu%22%2C%22ca_n%22%3A%22-%22%2C%22ca_i%22%3A%22-%22%2C%22sid%22%3A37053532593%2C%22kw%22%3A%2213826568520%22%7D; STA_DS=3; xiaoqubaoFlag=1; NTKF_PAGE_MANAGE=%7B%22m%22%3A%5B%5D%2C%22t%22%3A%2211%3A21%3A04%22%7D; NTKF_CACHE_DATA=%7B%22uid%22%3Anull%2C%22tid%22%3Anull%2C%22fid%22%3A%221425283616175969%22%2C%22d%22%3A%7B%7D%7D; GDNETSSOC=userm=VyTZ2dihqf/Vp+P4mGdFTl6PiWLhKpOnpZaenF5b63zH6WUI+m4usS3u0Zn6XDUD4I+uZbK8KwZe1+EyMjQzO85c3mxixVklfeRYJLcHJOVaciO3TbJg1CRFyUDsiJMSPLWF3TYqaj4=; nTalk_CACHE_DATA={uid:kf_10111_ISME9754_82550546,tid:1426750327772760}; _ga=GA1.2.369321906.1427440588; mobversionbeta=2.0; GANJISESSID=98e19986deda44b78db3f43d2d8e4333',
                'Host':'stat.corp.ganji.com',
                'Pragma':'no-cache',
                'Referer':'http://stat.corp.ganji.com/CustomBaseReportManagement/CustomBaseReportManager?reportId=20035',
                'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:36.0) Gecko/20100101 Firefox/36.0',
                'X-Requested-With':'XMLHttpRequest'}
    
    #crm.headers = {'X-Requested-With':'XMLHttpRequest','Content-Type':'application/x-www-form-urlencoded; charset=UTF-8'}
    
    citys = [{"Text":"全国","Value":"0"}]
    
    for c in citys:
        if city == c['Text']:
            city = c['Value']
    
    
    datas = {
             'page':'1',
             'para_filter':'"day_dt_stat_date":"'+StartDate+'","day_dt_end_date":"'+EndDate+'","city_key":"0","category_key":"7","source":"seo"',
             'size':'20'
                     }
    #print datas
    
    dataR = crm.post(sourceURL,datas,'')
    
    #print crm.url,crm.headers
    #dataR = json.loads(dataR)
    #print dataR
    sys.exit()
    tmpDic = {}
    for data in dataR['Data']:
        #print data
        #print data["s2"].encode('utf-8')
        tmpDic[data["s2"].encode('utf-8')] = {'uv':data["y5"]}
    return tmpDic
コード例 #22
0
ファイル: projects_tags.py プロジェクト: cgbill/ScrumDo
def google_chart_url(iteration_or_project):
    """Returns a URL for either a project or an iteration that corresponds to the burn up chart generated by google charts.
       The url will be to an image format. If no chart can be drawn, a 1x1 image is returned.  This should be used for quick
       summary charts, not detailed pretty ones.  We only use it in emails right now.  """
    try:
        total_points = []
        claimed_points = []
        max_val = 0
        claimed_dates = []
        claimed_values = []
        total_dates = []
        total_values = []        

        # Chart Size...
        if hasattr(iteration_or_project,"slug"):
            size = "550x120"
            # Project charts are bigger than iteration charts.
        else:
            size = "550x80"
        
        # Gather up all the points_log entries.
        for log in iteration_or_project.points_log.all():
            total_points.append( [log.timestamp(), log.points_total] )
            claimed_points.append( [log.timestamp(), log.points_claimed] )
            if log.points_total > max_val: 
                max_val = log.points_total
    
        # If we don't have enough points to draw a chart, bail.
        if len(total_points) <= 1:
            return "cht=lxy&chs=1x1"
        
        # Remove redundant data in chart data.
        total_points = reduce_burndown_data(total_points)
        claimed_points = reduce_burndown_data(claimed_points)

        # Some helper values.
        start_date = total_points[0][0]
        end_date = total_points[-1][0]
        start_date_s = date.fromtimestamp( start_date/1000 ).strftime('%Y-%m-%d')
        end_date_s = date.fromtimestamp( end_date/1000 ).strftime('%Y-%m-%d')
        date_range = end_date - start_date

        # Create the entries for the total points series.
        for piece in total_points:
            total_dates.append( _googleChartValue(piece[0], start_date, end_date) )
            total_values.append( _googleChartValue( piece[1] ,0, max_val) )
        
        # Create the entries for the claimed points series.
        for piece in claimed_points:
            claimed_dates.append( _googleChartValue(piece[0], start_date, end_date) )
            claimed_values.append( _googleChartValue( piece[1] ,0, max_val) )
        
        # Put it all together in google chart format.  (Docs: http://code.google.com/apis/chart/)
        data = "http://chart.googleapis.com/chart?chxr=0,0,%d&cht=lxy&chs=%s&chd=s:%s,%s,%s,%s&chxt=y,x&chxs=0,444444,8,0,lt&chxl=1:|%s|%s&chco=9ED147,197AFF&chm=B,7EAEE3,1,0,0|B,99CBB0,0,0,0" % ( max_val,size,"".join(claimed_dates), "".join(claimed_values), "".join(total_dates), "".join(total_values), start_date_s, end_date_s )
        #logger.debug(data)
        return data
    except:
        return "cht=lxy&chs=1x1"
コード例 #23
0
 def _get_case_dates(self):
     case_dates = []
     for e in self.html.xpath('//table[3]//tr[position() > 1]/td[3]//font'):
         s = html.tostring (e, method='text', encoding='unicode').strip()
         if s == '08//02/2001':
             case_dates.append(date.fromtimestamp(time.mktime(time.strptime('08/02/2001', '%m/%d/%Y'))))
         else:
             case_dates.append(date.fromtimestamp(time.mktime(time.strptime(s.strip(), '%m/%d/%Y'))))
     return case_dates
コード例 #24
0
    def keyinfos(self):
        global uid_string
        selecteditem = self.ui.listWidget.currentItem().text()
        uid = selecteditem
        email = uid.split('<')
        name = email[0]
        email = email[-1]
        email = email[:-1]

        for key in keylist:
            for uid_string in key['uids']:
                uid_strings.append(uid_string)
                uid_string2key[uid_string] = key
        key = uid_string2key[uid]
        self.ui.label_empreinte.setText(key['fingerprint'])
        self.ui.label_identifiant.setText(key['keyid'])
        self.ui.label_taille.setText(key['length'])
        self.ui.label_confiance.setText(key['ownertrust'])
        self.ui.label_nom.setText(name)
        if key['ownertrust'] == 'q':
            self.ui.label_confiance.setText('Je ne sais pas')
        elif key['ownertrust'] == 'u':
            self.ui.label_confiance.setText('Ultime')
        elif key['ownertrust'] == 'n':
            self.ui.label_confiance.setText('Jamais')
        elif key['ownertrust'] == 'm':
            self.ui.label_confiance.setText('Marginale')
        elif key['ownertrust'] == 'f':
            self.ui.label_confiance.setText('Complete')
        else:
            self.ui.label_confiance.setText(key['ownertrust'])
        date_created = int(key['date'])
        date_created = date.fromtimestamp(date_created)
        self.ui.label_creation.setText(str(date_created))
        if key["algo"] in ["1", "2", "3"]:
            self.ui.label_algorithme.setText("RSA")
        elif key["algo"] in ["16", "20"]:
            self.ui.label_algorithme.setText("Elgamal")
        elif key["algo"] in ["17"]:
            self.ui.label_algorithme.setText("DSA")
        elif key["algo"] in ["18"]:
            self.ui.label_algorithme.setText("ECDH")
        elif key["algo"] in ["10"]:
            self.ui.label_algorithme.setText("ECDSA")
        elif key["algo"] in ["19"]:
            self.ui.label_algorithme.setText("ECCDSA")
        elif key["algo"] in ["22"]:
            self.ui.label_algorithme.setText("ECC ed25519")
        else:
            self.ui.label_algorithme.setText("algo=" + key["algo"])
        if (key['expires']) != "":
            date_expire = int(key['expires'])
            date_expire = date.fromtimestamp(date_expire)
            self.ui.dateEdit.setDate(date_expire)
            self.ui.label_expiration.setText(str(date_expire))
        else:
            self.ui.label_expiration.setText("Pas d'expiration")
コード例 #25
0
ファイル: auth_tasks.py プロジェクト: HumanDynamics/openPDS
def computeFingerprint(ids):
    numBlocks = 8
    blockLength = (24 / numBlocks) * 3600
    blockOverlap = 1800
    baselineStart = getStartTime(90, True)
    # To simplify things, make baselineStart on a Monday
    baselineStart = baselineStart - date.fromtimestamp(baselineStart).weekday() * 24 * 3600
    start = getStartTime(1, False)
    baselineAPs = ids.getData("WifiProbe", baselineStart, start) or []
    baselineAPs = [(ap["time"], ap["value"]["bssid"], ap["value"]["level"], ap["value"]["ssid"]) for ap in baselineAPs]
    baselineAPs = topNFromSamples(baselineAPs, 5)
    baselineAPsByBlock = { day: { block: [] for block in range(0, numBlocks) } for day in range(0,7) }

    for ap in baselineAPs:
        baselineAPsByBlock[date.fromtimestamp(ap[0]).weekday()][timeToBlock(ap[0], numBlocks, blockLength)].append(ap[1])

    baselineAPsByBlock = { day: { block: [ap[0] for ap in Counter(baselineAPsByBlock[day][block]).most_common(10)] for block in range(0, numBlocks)} for day in range(0, 7)}
    pp = pprint.PrettyPrinter(indent=4)
    #pp.pprint(baselineAPsByBlock)
    testAPs = ids.getData("WifiProbe", start, None)

    testAPs = [(ap["time"], ap["value"]["bssid"], ap["value"]["level"]) for ap in testAPs]
    testAPs = topNFromSamples(testAPs, 5)
#    print clusterByTime(testAPs)
#    print topNFromSample(testAPs, 2)
    
    testAPsByBlock = {}
    for ap in testAPs:
        weekday = date.fromtimestamp(ap[0]).weekday()
        block = timeToBlock(ap[0], numBlocks, blockLength)
        if weekday not in testAPsByBlock:
            testAPsByBlock[weekday] = { block: [ap[1]] }
        elif block not in testAPsByBlock[weekday]:
            testAPsByBlock[weekday][block] = [ap[1]]
        else:
            testAPsByBlock[weekday][block].append(ap[1])
    
    testAPsByBlock = { day: { block: [ap[0] for ap in Counter(testAPsByBlock[day][block]).most_common(10)] for block in testAPsByBlock[day]} for day in testAPsByBlock}
    
    baseline= {}
    similarity = {}
    for day in testAPsByBlock:
        baseline[day] = {}
        similarity[day] = {}
        for block in testAPsByBlock[day]:
#            print "Day: %s, Block %s"%(day, block)
            base = set(baselineAPsByBlock[day][block])
            test = set(testAPsByBlock[day][block])
            baseline[day][block] = base
#            pp.pprint(base)
#            pp.pprint(test)
            overlap = len(base.union(test))
            jaccard = float(len(base.intersection(test))) / overlap if overlap > 0 else 0
            similarity[day][block] = jaccard
#            print "Jaccard: %s"%jaccard

    ids.saveAnswer("AuthFingerprint", { "base": stringKeys(baseline), "test": stringKeys(testAPsByBlock), "similarity": stringKeys(similarity)})
コード例 #26
0
ファイル: utils.py プロジェクト: FashtimeDotCom/xapian_weibo
def ts_range2date_strs(begin_ts, end_ts):
    begin_date = date.fromtimestamp(begin_ts)
    end_date = date.fromtimestamp(end_ts)
    date_strs = []
    while (begin_date <= end_date):
        date_strs.append(begin_date.strftime("%Y%m%d"))
        begin_date += timedelta(days=1)

    return date_strs
コード例 #27
0
def readable_timestamp(stamp):
    """
    :param stamp: A floating point number representing the POSIX file timestamp.
    :return: A short human-readable string representation of the timestamp.
    """
    if date.fromtimestamp(stamp) == date.today():
        return str(datetime.fromtimestamp(stamp).strftime("%I:%M:%S%p"))
    else:
        return str(date.fromtimestamp(stamp))
コード例 #28
0
    def expand_macro(self, formatter, name, arg_content):

        cursor = self.env.get_db_cnx().cursor()

        query = "SELECT name, due, completed, description from milestone order by due;"
        cursor.execute(query)

        miles = [mile for mile in cursor]

        content = []

        init = time.time()
        last = init

        tblMode = False
        if arg_content:
            tblMode = True
            content.append('|| Milestone || Due || Days from now || Days from Previous || Completed || Description ||')

        for name, due, completed, descrip in miles:

            d = date.fromtimestamp(due)
            dd = d.strftime('%b %d, %Y')
            dc = ''
            if completed:
                d = date.fromtimestamp(completed)
                dc = d.strftime('%b %d, %Y')
            dl = int((due - init) / 86400)
            dp = int((due - last) / 86400)

            if not tblMode:
                dt = " '''%s'''" % name
                dt += " ''due %s [in %d days, for %d days]" % (dd, dl, dp)
                if completed:
                    dt += '; completed %s' % dc
                dt += "''::"

                content.append(dt)
                if descrip != None and descrip.strip() != '':
                    content.append('   %s' % descrip)
            else:
                dt = '||%s||%s||%d||%d||%s||%s||' % (name, dd, dl, dp, dc, descrip)
                content.append(dt)

            last = due

        content = '\n'.join(content)

        content = format_to_html(self.env, formatter.context, content)

        content = '<div class="milestone-list">%s</div>' % content

        # to avoid things like the above it might be nicer to use
        # Genshi tag() construction.

        return content
コード例 #29
0
def SEARCHFORAIRINGS(url,name):
    keyboard = xbmc.Keyboard('', __language__(21010))
    keyboard.doModal()
    if (keyboard.isConfirmed()):
        titleToSearchFor = keyboard.getText()
    if(titleToSearchFor == "" or titleToSearchFor == None):
        return

    now = time.time()
    startRange = str(long(now * 1000))
    #url = strUrl + '/sagex/api?command=EvaluateExpression&1=FilterByRange(SearchByTitle("%s","T"),"GetAiringStartTime","%s",java_lang_Long_MAX_VALUE,true)&encoder=json' % (urllib2.quote(titleToSearchFor.encode("utf8")), startRange)
    #url = strUrl + '/sagex/api?command=EvaluateExpression&1=FilterByRange(SearchByTitle("%s","T"),"GetAiringStartTime",java_lang_Long_parseLong("%d"),java_lang_Long_MAX_VALUE,true)&encoder=json' % (urllib2.quote(titleToSearchFor.encode("utf8")), int(time.time()) * 1000)
    url = strUrl + '/sagex/api?command=EvaluateExpression&1=FilterByRange(SearchSelectedFields("%s",false,true,true,false,false,false,false,false,false,false,"T"),"GetAiringStartTime",java_lang_Long_parseLong("%d"),java_lang_Long_MAX_VALUE,true)&size=100&encoder=json' % (urllib2.quote(titleToSearchFor.encode("utf8")), int(time.time()) * 1000)
    airings = executeSagexAPIJSONCall(url, "Result")
    for airing in airings:
        show = airing.get("Show")
        strTitle = airing.get("AiringTitle")
        strTitleEncoded = strTitle.encode("utf8")
        strEpisode = show.get("ShowEpisode")
        if(strEpisode == None):
            strEpisode = ""        
        strDescription = show.get("ShowDescription")
        if(strDescription == None):
            strDescription = ""        
        strGenre = show.get("ShowCategoriesString")
        strAiringID = str(airing.get("AiringID"))
        seasonNum = int(show.get("ShowSeasonNumber"))
        episodeNum = int(show.get("ShowEpisodeNumber"))
        studio = airing.get("AiringChannelName")        
        isFavorite = airing.get("IsFavorite")
        
        startTime = float(airing.get("AiringStartTime") // 1000)
        strAiringdateObject = date.fromtimestamp(startTime)
        airTime = strftime('%H:%M', time.localtime(startTime))
        strAiringdate = "%02d.%02d.%s" % (strAiringdateObject.day, strAiringdateObject.month, strAiringdateObject.year)
        strOriginalAirdate = strAiringdate
        if(airing.get("OriginalAiringDate")):
            startTime = float(airing.get("OriginalAiringDate") // 1000)
            strOriginalAirdateObject = date.fromtimestamp(startTime)
            strOriginalAirdate = "%02d.%02d.%s" % (strOriginalAirdateObject.day, strOriginalAirdateObject.month, strOriginalAirdateObject.year)

        # if there is no episode name use the description in the title
        strDisplayText = strTitleEncoded
        if(strGenre.find("Movie")<0 and strGenre.find("Movies")<0 and strGenre.find("Film")<0 and strGenre.find("Shopping")<0 and strGenre.find("Consumer")<0):
            if(strEpisode == ""):
                if(strDescription != ""):
                    strDisplayText = strTitleEncoded + ' - ' + strDescription
                else:
                    strDisplayText = studio + " News - " + strftime('%a %b %d', time.localtime(startTime)) + " @ " + airTime
                    strDescription = strGenre
            else:
                strDisplayText = strTitleEncoded + ' - ' + strEpisode
        strDisplayText = strftime('%a %b %d', time.localtime(startTime)) + " @ " + airTime + ": " + strDisplayText
        addAiringLink(strDisplayText,'',strDescription,IMAGE_THUMB,strGenre,strOriginalAirdate,strAiringdate,strTitleEncoded,strAiringID,seasonNum,episodeNum,studio,isFavorite, airing.get("AiringStartTime"), airing.get("AiringEndTime"))

    xbmc.executebuiltin("Container.SetViewMode(504)")
コード例 #30
0
ファイル: default.py プロジェクト: noba3/KoTos
 def is_in_current_week(self, strdate, alt=False):
     if alt:
         showdate = date.fromtimestamp(mktime(strptime(strdate, "%b/%d/%Y")))
     else:
         showdate = date.fromtimestamp(mktime(strptime(strdate, "%Y-%m-%d")))
     weekrange = int((showdate - self.today).days)
     if weekrange >= 0 and weekrange <= 6:
         return True
     else:
         return False
コード例 #31
0
#!/usr/bin/python

# based on the article found at http://bilgin.esme.org/BitsBytes/KalmanFilterforDummies.aspx

from datetime import date
from time import time
from math import sqrt
import pylab
from pylab import array, random
from yahoo_finance import Share

start_date = date.fromtimestamp(time() -
                                60 * 60 * 24 * 30).strftime('%Y-%m-%d')
end_date = date.today().strftime('%Y-%m-%d')
share = Share('TWTR')
share_history = share.get_historical(start_date, end_date)
readings = []
for day in share_history:
    readings.append(float(day['Close']))

# kalman filter implementation
# ============================
process_variance = 1e-5
estimated_measurement_variance = 0.1**2
posteri_estimate = 0.0
posteri_error_estimate = 1.0
estimates = []
estimate_errors = []

for reading in readings:
    # predict
コード例 #32
0
ファイル: test_utils.py プロジェクト: uedak/py3x
def test_Date_compat():
    d0 = date(2019, 12, 4)
    d1 = Date('2019-12-04')
    assert d1 == d0
    assert d0 == d1
    assert str(d1) == str(d0) == '2019-12-04'

    assert isinstance(d1, Date)
    assert not isinstance(d1, DateTime)
    assert isinstance(d1, date)
    assert not isinstance(d1, datetime)
    assert len({d0, d1}) == 1

    with pytest.raises(AttributeError) as e:
        d1.xxx
    assert e.value.args == ("'Date' object has no attribute 'xxx'", )

    with pytest.raises(AttributeError) as e:
        d1.yyy = 100
    assert e.value.args == ("'Date' object has no attribute 'yyy'", )

    d2 = Date('2019-12-06')
    dt0 = datetime(2019, 12, 4)
    dt2 = DateTime('2019-12-04 00:00:02')
    for x in (
        (d0, d0, dict(seconds=0)),
        (d2, d0, dict(days=2)),
        (dt0, d0, None),
        (dt2, d0, None),
        (d0, d2, dict(days=-2)),
        (d2, d2, dict(seconds=0)),
        (dt0, d2, None),
        (dt2, d2, None),
        (d0, dt0, None),
        (d2, dt0, None),
        (dt0, dt0, dict(seconds=0)),
        (dt2, dt0, dict(seconds=2)),
        (d0, dt2, None),
        (d2, dt2, None),
        (dt0, dt2, dict(days=-1, seconds=86398)),
        (dt2, dt2, dict(seconds=0)),
    ):
        # print(x)
        if x[2] is None:
            with pytest.raises(TypeError) as e:
                x[0] - x[1]
            assert e.value.args[0].startswith('unsupported operand')
        else:
            assert (x[0] - x[1]) == timedelta(**x[2])

    assert d1.ctime() == d0.ctime() == 'Wed Dec  4 00:00:00 2019'
    for m in (date.ctime, Date.ctime):
        with pytest.raises(TypeError):
            m()

    assert d1.day == d0.day == 4

    assert '%r' % Date.fromordinal(737397) == \
           '%r' % d1.fromordinal(737397) == "Date('2019-12-04')"
    assert '%r' % date.fromordinal(737397) == \
           '%r' % d0.fromordinal(737397) == 'datetime.date(2019, 12, 4)'

    e1 = d1.epoch()
    assert '%r' % Date.fromtimestamp(e1) == \
           '%r' % d1.fromtimestamp(e1) == "Date('2019-12-04')"
    assert '%r' % date.fromtimestamp(e1) == \
           '%r' % d0.fromtimestamp(e1) == 'datetime.date(2019, 12, 4)'

    assert d1.isocalendar() == d0.isocalendar() == (2019, 49, 3)
    for m in (Date.isocalendar, date.isocalendar):
        with pytest.raises(TypeError):
            m()

    assert d1.isoformat() == d0.isoformat() == '2019-12-04'
    for m in (Date.isoformat, date.isoformat):
        with pytest.raises(TypeError):
            m()

    assert d1.isoweekday() == d0.isoweekday() == 3
    for m in (Date.isoweekday, date.isoweekday):
        with pytest.raises(TypeError):
            m()

    assert '%r' % [d1.min, d1.max] == \
        "[Date('0001-01-01'), Date('9999-12-31')]"
    assert '%r' % [d0.min, d0.max] == \
        '[datetime.date(1, 1, 1), datetime.date(9999, 12, 31)]'
    assert d1.min == d0.min
    assert d0.max == d1.max

    assert d1.month == d0.month == 12

    assert '%r' % d1.replace(day=5) == "Date('2019-12-05')"
    assert '%r' % d0.replace(day=5) == 'datetime.date(2019, 12, 5)'
    for m in (Date.replace, date.replace):
        with pytest.raises(TypeError):
            m(day=5)

    assert d1.resolution == d0.resolution

    assert d1.strftime('%F') == d0.strftime('%F') == '2019-12-04'
    for m in (Date.strftime, date.strftime):
        with pytest.raises(TypeError):
            m('%F')

    assert d1.timetuple() == d0.timetuple()
    for m in (Date.timetuple, date.timetuple):
        with pytest.raises(TypeError):
            m()

    assert Date.today() == d1.today() == date.today() == d0.today()

    assert d1.toordinal() == d0.toordinal() == 737397
    for m in (Date.toordinal, date.toordinal):
        with pytest.raises(TypeError):
            m()

    assert d1.weekday() == d0.weekday() == 2
    for m in (Date.weekday, date.weekday):
        with pytest.raises(TypeError):
            m()

    assert d1.year == d0.year == 2019

    dt = DateTime.from_db('2020-02-01 00:00:00.000001')
    assert str(dt + 1) == '2020-02-01 00:00:01.000001'

    dt = DateTime.from_db('2020-02-01 00:00:00.1')
    assert str(dt + 1) == '2020-02-01 00:00:01.100000'
コード例 #33
0
ファイル: iterator.py プロジェクト: fidemin/python-study
def day_grouper(iterable):
    key = lambda value: date.fromtimestamp(value[0])
    return groupby(iterable, key)
コード例 #34
0
def time(data_time):
    data_time = data_time/1000.0
    timestamp = date.fromtimestamp(data_time)
    return timestamp.strftime("%a %m %d")
コード例 #35
0
from datetime import date
today = date.fromtimestamp(9999999999)
print(today)
"""
Returns a time period from 1970.
Argument is no. of seconds
Output-
2286-11-20
"""
コード例 #36
0
def createUserRecord():
    currentTime = time.time()
    today = date.fromtimestamp(currentTime)
    startTime = time.mktime((today - timedelta(days=1)).timetuple())

    socialHealthScores = recentSocialHealthScores()
    data = {}

    for guid, scores in socialHealthScores.iteritems():
        profile = Profile.objects.get(uuid=guid)
        dbName = "User_" + str(profile.id)
        data[guid] = {}
        collection = connection[dbName]["funf"]
        answerCollection = connection[dbName]["answer"]
        answerCollection.remove({"key": "UserRecord"})
        averageLows = {
            score["key"]: score["value"]
            for score in scores if score["layer"] == "averageLow"
        }
        averageHighs = {
            score["key"]: score["value"]
            for score in scores if score["layer"] == "averageHigh"
        }
        userScores = {
            score["key"]: score["value"]
            for score in scores if score["layer"] == "User"
        }
        print averageLows
        print averageHighs
        print userScores
        color = "none"
        color_fill = "#9ACD32"
        for metric, value in userScores.iteritems():
            if value < averageLows[metric] or value > averageHighs[metric]:
                color_fill = "#FF0000"
                color = "red"
        locations = [
            entry["value"] for entry in collection.find(
                {
                    "key": {
                        "$regex": "LocationProbe$"
                    },
                    "time": {
                        "$gte": startTime,
                        "$lt": currentTime
                    }
                },
                limit=200)
        ]
        locations = [
            value["location"] if "location" in value else value
            for value in locations
        ]

        timestampedlatlongs = [{
            "timestamp": int(value["timestamp"]),
            "lat": value["mlatitude"],
            "lng": value["mlongitude"]
        } for value in locations]
        data[guid]["user"] = "******"
        data[guid]["color"] = color
        data[guid]["color-fill"] = color_fill
        data[guid]["locations"] = timestampedlatlongs
        data[guid]["issharing"] = False
        data[guid]["photo"] = "/static/img/bbc_demo/locked.png"
        answer = {"key": "UserRecord"}
        answer["value"] = data[guid]
        answerCollection.save(answer)
    return data
コード例 #37
0
CITY_NAME = "DC"

# Get raw post data, read each separate line into an array
raw_post_times = map(
    int,
    open("./raw_data/post_times_" + CITY_NAME + ".txt").read().splitlines())

# Initialize the 2D array for the post frequencies
post_freqs = [[0 for col in range(NUM_HOURS_IN_DAY)]
              for row in range(NUM_DAYS_IN_WEEK)]

# Go through all the raw post times
for raw_post_time in raw_post_times:
    # Convert into UTC time and python date object, offsetting for timezone along the way
    post_time = time.gmtime(raw_post_time + TIME_HOUR_OFFSET * 60 * 60)
    post_date = date.fromtimestamp(raw_post_time + TIME_HOUR_OFFSET * 60 * 60)
    print post_time
    print post_date

    # Check if post is between Nov. 23 and Nov. 29 (for the purposes of this project only)
    if 23 <= post_time.tm_mday <= 29:
        # If it is, iterate the appropriate frequency element in the frequency array
        post_freqs[post_date.weekday()][post_time.tm_hour] += 1
    else:
        # Otherwise, take note of it (lots of these show that the scrape as many unused data)
        print "Date out of range"
    print ""

# Save post frequencies
post_freq_file = open("./freq_data/post_freq_" + CITY_NAME + ".txt", "w")
np.savetxt(fname=post_freq_file, X=post_freqs, fmt='%d', delimiter=' ')
コード例 #38
0
ファイル: win007.py プロジェクト: hxl95810/python
class Win007Spider(scrapy.Spider):
    name = 'win007'
    allowed_domains = ['www.win007.com']
    #     start_urls = ['http://score.nowscore.com/data/score.aspx?date=2017/9/1']
    start_urls = []
    #获取全部翻页链接

    begin = date.fromtimestamp(
        int(time.mktime(time.strptime("2017-09-05", "%Y-%m-%d"))))
    end = date.fromtimestamp(
        int(time.mktime(time.strptime("2017-09-05", "%Y-%m-%d"))))

    #     end = date.today()

    for i in range((end - begin).days + 1):
        x_day = begin + datetime.timedelta(days=i)
        urls = 'http://score.nowscore.com/data/score.aspx?date=%s' % x_day
        start_urls.append(urls)

#matchStatus  0为未开场,-1为完场,-11为待定,12为腰斩,14为推迟

    headers = {
        "HOST":
        "www.310win.com",
        'User-Agent':
        "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:51.0) Gecko/20100101 Firefox/51.0"
    }

    handle_httpstatus_list = [404]

    def parse(self, response):

        tdate = parse.urlparse(response.url).query.split("=")[1]

        jbscore = response.text
        jbscore = jbscore.replace('\r\n', '')
        jbarr = jbscore.split(";")

        matchcount = int(re.sub("\D", "", jbarr[3])) + 5
        sclasscount = int(re.sub("\D", "", jbarr[4])) + matchcount

        jbmatch = jbarr[5:matchcount]  #获取比赛信息
        jbsclass = jbarr[matchcount:sclasscount]  #获取比赛分类

        a = []
        for matchrow in jbmatch:

            matchdata = re.sub('<[^>]+>', '', matchrow).split("=")
            matchdata[1] = matchdata[1].replace('[', '')
            matchdata[1] = matchdata[1].replace(']', '')
            matchdata[1] = matchdata[1].replace('\'', '')

            newmatchdata = matchdata[1].split(',')
            a.append(newmatchdata)
        b = []
        for sclassrow in jbsclass:

            sclassdata = re.sub('<[^>]+>', '', sclassrow).split("=")
            sclassdata[1] = sclassdata[1].replace('[', '')
            sclassdata[1] = sclassdata[1].replace(']', '')
            sclassdata[1] = sclassdata[1].replace('\'', '')

            newsclassdata = sclassdata[1].split(',')
            b.append(newsclassdata)

        for row in a:

            matchId = row[0]  #比赛id
            cnHome = row[4]  #主队名称
            hkHome = row[5]  #主队繁体名称
            enHome = row[6]  #主队英文名称

            cnGuest = row[7]  #客队名称
            hkGuest = row[8]  #客队繁体名称
            enGuest = row[9]  #客队英文名称

            matchTime = row[10]  #比赛时间
            matchStatus = row[12]  #比赛状态 -1为完场 z

            homeGoal = row[13]  #主队得分
            guestGoal = row[14]  #队客得分
            matchHandicap = row[25]  #比赛盘口
            matchClass = b[int(row[1])][1]  #联赛分类
            homeRanking = row[21]  #主队排名
            guestRanking = row[22]  #客队排名

            matchScroll = row[24]  #True sb为滚 z
            matchTime = matchTime[0:8] + ' ' + matchTime[-5:]

            if matchHandicap == '' or matchStatus != '-1' or matchScroll != "True":
                continue

            get_url = 'http://www.310win.com/handicap/' + matchId + '.html'  #采集盘口水位地址
            #             print (get_url)
            #             print (cnHome)
            #             print (matchClass)
            item_loader = Win007MatchItem()
            item_loader["date"] = tdate
            item_loader["matchId"] = matchId
            item_loader["cnHome"] = cnHome
            item_loader["hkHome"] = hkHome
            item_loader["enHome"] = enHome
            item_loader["cnGuest"] = cnGuest

            item_loader["hkGuest"] = hkGuest
            item_loader["enGuest"] = enGuest
            item_loader["matchTime"] = matchTime
            item_loader["homeGoal"] = homeGoal

            item_loader["guestGoal"] = guestGoal
            item_loader["matchHandicap"] = matchHandicap
            item_loader["matchClass"] = matchClass
            item_loader["homeRanking"] = homeRanking
            item_loader["guestRanking"] = guestRanking

            #             item_loader = TakeFirstItemLoader(item=Win007MatchItem(), response=response)
            #             item_loader = Win007MatchItem()
            #
            #             item_loader.add_value("date", tdate)
            #             item_loader.add_value("matchId", matchId)
            #             item_loader.add_value("cnHome", cnHome)
            #             item_loader.add_value("hkHome", hkHome)
            #             item_loader.add_value("enHome", enHome)
            #             item_loader.add_value("cnGuest", cnGuest)
            #             item_loader.add_value("hkGuest", hkGuest)
            #             item_loader.add_value("enGuest", enGuest)
            #             item_loader.add_value("matchTime", matchTime)
            #             item_loader.add_value("homeGoal", homeGoal)
            #             item_loader.add_value("guestGoal", guestGoal)
            #             item_loader.add_value("matchHandicap", matchHandicap)
            #             item_loader.add_value("matchClass", matchClass)
            #             item_loader.add_value("homeRanking", homeRanking)
            #             item_loader.add_value("guestRanking", guestRanking)
            #             item_loader = item_loader.load_item()

            yield scrapy.Request(get_url,
                                 headers=self.headers,
                                 callback=self.parse_detail,
                                 dont_filter=True)
            yield item_loader

#             yield Request(url=post_url, callback=self.parse_detail,dont_filter=True)

    def parse_detail(self, response):
        handicapUrl = parse.urlparse(response.url)

        i = len(handicapUrl.path) - 1
        while i > 0:
            if handicapUrl.path[i] == '/':
                break
            i = i - 1

        matchId = handicapUrl.path[i + 1:len(handicapUrl.path)].split('.')[0]

        #初盘
        sb_starthandicap = response.css(
            "#odds > table > tr:nth-child(3) > td:nth-child(3) ::text"
        ).extract()[0]  #sb
        bet365_starthandicap = response.css(
            "#odds > table > tr:nth-child(4) > td:nth-child(3) ::text"
        ).extract()[0]  #365
        ladbrokes_starthandicap = response.css(
            "#odds > table > tr:nth-child(6) > td:nth-child(3) ::text"
        ).extract()[0]  #立博
        betvictor_starthandicap = response.css(
            "#odds > table > tr:nth-child(7) > td:nth-child(3) ::text"
        ).extract()[0]  #韦德

        #终盘
        sb_overhandicap = response.css(
            "#odds > table > tr:nth-child(3) > td:nth-child(6) ::text"
        ).extract()[0]  #sb
        bet365_overhandicap = response.css(
            "#odds > table > tr:nth-child(4) > td:nth-child(6) ::text"
        ).extract()[0]  #sb
        ladbrokes_overhandicap = response.css(
            "#odds > table > tr:nth-child(6) > td:nth-child(6) ::text"
        ).extract()[0]  #sb
        betvictor_overhandicap = response.css(
            "#odds > table > tr:nth-child(7) > td:nth-child(6) ::text"
        ).extract()[0]  #sb

        #初盘水位
        sb_homestartodds = response.css(
            "#odds > table > tr:nth-child(3) > td:nth-child(2) ::text"
        ).extract()[0].strip()  #sb水位(主队初盘)
        sb_gueststartodds = response.css(
            "#odds > table > tr:nth-child(3) > td:nth-child(4) ::text"
        ).extract()[0].strip()  #sb水位(客队初盘)
        bet365_homestartodds = response.css(
            "#odds > table > tr:nth-child(4) > td:nth-child(2) ::text"
        ).extract()[0].strip()  #365水位(主队初盘)
        bet365_gueststartodds = response.css(
            "#odds > table > tr:nth-child(4) > td:nth-child(4) ::text"
        ).extract()[0].strip()  #365水位(客队初盘)
        ladbrokes_homestartodds = response.css(
            "#odds > table > tr:nth-child(6) > td:nth-child(2) ::text"
        ).extract()[0].strip()  #立博水位(主队初盘)
        ladbrokes_gueststartodds = response.css(
            "#odds > table > tr:nth-child(6) > td:nth-child(4) ::text"
        ).extract()[0].strip()  #立博水位(客队初盘)
        betvictor_homestartodds = response.css(
            "#odds > table > tr:nth-child(7) > td:nth-child(2) ::text"
        ).extract()[0].strip()  #韦德水位(主队初盘)
        betvictor_gueststartodds = response.css(
            "#odds > table > tr:nth-child(7) > td:nth-child(4) ::text"
        ).extract()[0].strip()  #韦德水位(客队初盘)

        sb_homestartodds = float(sb_homestartodds or 0) * 1000
        sb_gueststartodds = float(sb_gueststartodds or 0) * 1000
        bet365_homestartodds = float(bet365_homestartodds or 0) * 1000
        bet365_gueststartodds = float(bet365_gueststartodds or 0) * 1000
        ladbrokes_homestartodds = float(ladbrokes_homestartodds or 0) * 1000
        ladbrokes_gueststartodds = float(ladbrokes_gueststartodds or 0) * 1000
        betvictor_homestartodds = float(betvictor_homestartodds or 0) * 1000
        betvictor_gueststartodds = float(betvictor_gueststartodds or 0) * 1000

        #终盘水位
        sb_homeoverodds = response.css(
            "#odds > table > tr:nth-child(3) > td:nth-child(5) ::text"
        ).extract()[0].strip()  #sb水位(主队终盘)
        sb_guestoverodds = response.css(
            "#odds > table > tr:nth-child(3) > td:nth-child(7) ::text"
        ).extract()[0].strip()  #sb水位(客队终盘)
        bet365_homeoverodds = response.css(
            "#odds > table > tr:nth-child(4) > td:nth-child(5) ::text"
        ).extract()[0].strip()  #365水位(主队终盘)
        bet365_guestoverodds = response.css(
            "#odds > table > tr:nth-child(4) > td:nth-child(7) ::text"
        ).extract()[0].strip()  #365水位(客队终盘)
        ladbrokes_homeoverodds = response.css(
            "#odds > table > tr:nth-child(6) > td:nth-child(5) ::text"
        ).extract()[0].strip()  #立博水位(主队终盘)
        ladbrokes_guestoverodds = response.css(
            "#odds > table > tr:nth-child(6) > td:nth-child(7) ::text"
        ).extract()[0].strip()  #立博水位(客队终盘)
        betvictor_homeoverodds = response.css(
            "#odds > table > tr:nth-child(7) > td:nth-child(5) ::text"
        ).extract()[0].strip()  #韦德水位(主队终盘)
        betvictor_guestoverodds = response.css(
            "#odds > table > tr:nth-child(7) > td:nth-child(7) ::text"
        ).extract()[0].strip()  #韦德水位(客队终盘)

        sb_homeoverodds = float(sb_homeoverodds or 0) * 1000
        sb_guestoverodds = float(sb_guestoverodds or 0) * 1000
        bet365_homeoverodds = float(bet365_homeoverodds or 0) * 1000
        bet365_guestoverodds = float(bet365_guestoverodds or 0) * 1000
        ladbrokes_homeoverodds = float(ladbrokes_homeoverodds or 0) * 1000
        ladbrokes_guestoverodds = float(ladbrokes_guestoverodds or 0) * 1000
        betvictor_homeoverodds = float(betvictor_homeoverodds or 0) * 1000
        betvictor_guestoverodds = float(betvictor_guestoverodds or 0) * 1000

        sb_company = response.css(
            "#odds > table > tr:nth-child(3) > td:nth-child(1) ::text"
        ).extract()[0].replace('走地', '').strip()
        bet365_company = response.css(
            "#odds > table > tr:nth-child(4) > td:nth-child(1) ::text"
        ).extract()[0].replace('走地', '').strip()
        ladbrokes_company = response.css(
            "#odds > table > tr:nth-child(6) > td:nth-child(1) ::text"
        ).extract()[0].replace('走地', '').strip()
        betvictor_company = response.css(
            "#odds > table > tr:nth-child(7) > td:nth-child(1) ::text"
        ).extract()[0].replace('走地', '').strip()

        #采集判断水位
        #         if bet365_homestartodds+bet365_gueststartodds==1900:
        #             pass
        #         else:
        #             continue;

        size_url = 'http://www.310win.com/overunder/' + matchId + '.html'  #大小

        SbItem = Win007HandicapSbItem()
        SbItem["matchid"] = int(matchId)
        SbItem["company"] = sb_company
        SbItem["asian_homestartodds"] = sb_homestartodds
        SbItem["asian_gueststartodds"] = sb_gueststartodds
        SbItem["asian_starthandicap"] = sb_starthandicap
        SbItem["asian_homeoverodds"] = sb_homeoverodds
        SbItem["asian_guestoverodds"] = sb_guestoverodds
        SbItem["asian_overhandicap"] = sb_overhandicap

        Bet365Item = Win007HandicapBet365Item()
        Bet365Item["matchid"] = int(matchId)
        Bet365Item["company"] = bet365_company
        Bet365Item["asian_homestartodds"] = bet365_homestartodds
        Bet365Item["asian_gueststartodds"] = bet365_gueststartodds
        Bet365Item["asian_starthandicap"] = bet365_starthandicap
        Bet365Item["asian_homeoverodds"] = bet365_homeoverodds
        Bet365Item["asian_guestoverodds"] = bet365_guestoverodds
        Bet365Item["asian_overhandicap"] = bet365_overhandicap

        LadbrokesItem = Win007HandicapLadbrokesItem()
        LadbrokesItem["matchid"] = int(matchId)
        LadbrokesItem["company"] = ladbrokes_company
        LadbrokesItem["asian_homestartodds"] = ladbrokes_homestartodds
        LadbrokesItem["asian_gueststartodds"] = ladbrokes_gueststartodds
        LadbrokesItem["asian_starthandicap"] = ladbrokes_starthandicap
        LadbrokesItem["asian_homeoverodds"] = ladbrokes_homeoverodds
        LadbrokesItem["asian_guestoverodds"] = ladbrokes_guestoverodds
        LadbrokesItem["asian_overhandicap"] = ladbrokes_overhandicap

        BetvictorItem = Win007HandicapBetvictorItem()
        BetvictorItem["matchid"] = int(matchId)
        BetvictorItem["company"] = betvictor_company
        BetvictorItem["asian_homestartodds"] = betvictor_homestartodds
        BetvictorItem["asian_gueststartodds"] = betvictor_gueststartodds
        BetvictorItem["asian_starthandicap"] = betvictor_starthandicap
        BetvictorItem["asian_homeoverodds"] = betvictor_homeoverodds
        BetvictorItem["asian_guestoverodds"] = betvictor_guestoverodds
        BetvictorItem["asian_overhandicap"] = betvictor_overhandicap

        request = scrapy.Request(size_url,
                                 headers=self.headers,
                                 callback=self.parse_size,
                                 dont_filter=True)
        request.meta['SbItem'] = SbItem
        request.meta['Bet365Item'] = Bet365Item
        request.meta['LadbrokesItem'] = LadbrokesItem
        request.meta['BetvictorItem'] = BetvictorItem
        yield request

#         yield scrapy.Request(get_url, headers=self.headers,callback=self.parse_detail,dont_filter=True)

    def parse_size(self, response):

        sb_startsize = response.css("#td_22::text").extract()[0]
        bet365_startsize = response.css("#td_42::text").extract()[0]
        ladbrokes_startsize = response.css("#td_32::text").extract()[0]
        betvictor_startsize = response.css("#td_62::text").extract()[0]

        sb_oversize = response.css("#td_25::text").extract()[0]
        bet365_oversize = response.css("#td_45::text").extract()[0]
        ladbrokes_oversize = response.css("#td_35::text").extract()[0]
        betvictor_oversize = response.css("#td_65::text").extract()[0]

        sb_homestarsize = response.css("#td_21::text").extract()[0]
        sb_gueststarsize = response.css("#td_23::text").extract()[0]
        bet365_homestarsize = response.css("#td_41::text").extract()[0]
        bet365_gueststarsize = response.css("#td_43::text").extract()[0]
        ladbrokes_homestarsize = response.css("#td_31::text").extract()[0]
        ladbrokes_gueststarsize = response.css("#td_33::text").extract()[0]
        betvictor_homestarsize = response.css("#td_61::text").extract()[0]
        betvictor_gueststarsize = response.css("#td_63::text").extract()[0]

        sb_homeoversize = response.css("#td_24::text").extract()[0]
        sb_guestoversize = response.css("#td_26::text").extract()[0]
        bet365_homeoversize = response.css("#td_44::text").extract()[0]
        bet365_guestoversize = response.css("#td_46::text").extract()[0]
        ladbrokes_homeoversize = response.css("#td_34::text").extract()[0]
        ladbrokes_guestoversize = response.css("#td_36::text").extract()[0]
        betvictor_homeoversize = response.css("#td_64::text").extract()[0]
        betvictor_guestoversize = response.css("#td_66::text").extract()[0]

        #                 $sb_homeoversize = pq("#odds tr:eq(3) td:eq(4)")->text() * 1000;
        #                 $sb_guestoversize = pq("#odds tr:eq(3) td:eq(6)")->text() * 1000;
        #                 $bet365_homeoversize = pq("#odds tr:eq(5) td:eq(4)")->text() * 1000;
        #                 $bet365_guestoversize = pq("#odds tr:eq(5) td:eq(6)")->text() * 1000;
        #                 $ladbrokes_homeoversize = pq("#odds tr:eq(4) td:eq(4)")->text() * 1000;
        #                 $ladbrokes_guestoversize = pq("#odds tr:eq(4) td:eq(6)")->text() * 1000;
        #                 $betvictor_homeoversize = pq("#odds tr:eq(7) td:eq(4)")->text() * 1000;
        #                 $betvictor_guestoversize = pq("#odds tr:eq(7) td:eq(6)")->text() * 1000;

        SbItem = response.meta['SbItem']
        SbItem["size_homestartodds"] = sb_homestarsize
        SbItem["size_gueststartodds"] = sb_gueststarsize
        SbItem["size_starthandicap"] = sb_startsize
        SbItem["size_homeoverodds"] = sb_homeoversize
        SbItem["size_guestoverodds"] = sb_guestoversize
        SbItem["size_overhandicap"] = sb_oversize

        Bet365Item = response.meta['Bet365Item']
        Bet365Item["size_homestartodds"] = bet365_homestarsize
        Bet365Item["size_gueststartodds"] = bet365_gueststarsize
        Bet365Item["size_starthandicap"] = bet365_startsize
        Bet365Item["size_homeoverodds"] = bet365_homeoversize
        Bet365Item["size_guestoverodds"] = bet365_guestoversize
        Bet365Item["size_overhandicap"] = bet365_oversize

        LadbrokesItem = response.meta['LadbrokesItem']
        LadbrokesItem["size_homestartodds"] = ladbrokes_homestarsize
        LadbrokesItem["size_gueststartodds"] = ladbrokes_gueststarsize
        LadbrokesItem["size_starthandicap"] = ladbrokes_startsize
        LadbrokesItem["size_homeoverodds"] = ladbrokes_homeoversize
        LadbrokesItem["size_guestoverodds"] = ladbrokes_guestoversize
        LadbrokesItem["size_overhandicap"] = ladbrokes_oversize

        BetvictorItem = response.meta['BetvictorItem']
        BetvictorItem["size_homestartodds"] = betvictor_homestarsize
        BetvictorItem["size_gueststartodds"] = betvictor_gueststarsize
        BetvictorItem["size_starthandicap"] = betvictor_startsize
        BetvictorItem["size_homeoverodds"] = betvictor_homeoversize
        BetvictorItem["size_guestoverodds"] = betvictor_guestoversize
        BetvictorItem["size_overhandicap"] = betvictor_oversize

        yield SbItem
        yield Bet365Item
        yield LadbrokesItem
        yield BetvictorItem
コード例 #39
0
def date_of_file(filename):
    return date.fromtimestamp(getmtime(filename))
コード例 #40
0
ファイル: ksuid.py プロジェクト: lepy/KSUID
    def getDatetime(self):
        """ getDatetime() returns a python date object which represents the approximate time
        that the ksuid was created """

        unixTime = self.getTimestamp()
        return date.fromtimestamp(unixTime)
コード例 #41
0
ファイル: reel_widgets.py プロジェクト: uniphil/jk-1
 def update(self, reel):
     # self.description.config(text=reel.description)
     self.loaded_label.config(
         text='Loaded {:%b %d}'.format(date.fromtimestamp(reel.loaded_at)))
     self.current_frame_number.config(text=self.current_frame.get())
コード例 #42
0
    def handle(self, raw_file):
        print self.label, "reading", raw_file
        try:
            record_count = 0
            bytes_read = 0
            start = datetime.now()
            for len_path, len_data, timestamp, path, data, err in fileutil.unpack(raw_file):
                record_count += 1
                self.records_read += 1
                if err:
                    print self.label, "ERROR: Found corrupted data for record", record_count, "in", raw_file, "path:", path, "Error:", err
                    self.bad_records += 1
                    continue
                if len(data) == 0:
                    print self.label, "ERROR: Found empty data for record", record_count, "in", raw_file, "path:", path
                    self.bad_records += 1
                    continue

                # Incoming timestamps are in milliseconds, so convert to POSIX first
                # (ie. seconds)
                submission_date = date.fromtimestamp(timestamp / 1000).strftime("%Y%m%d")
                path = unicode(path, errors="replace")
                #print "Path for record", record_count, path, "length of data:", len_data

                if data[0] != "{":
                    # Data looks weird, should be JSON.
                    print self.label, "Warning: Found unexpected data for record", record_count, "in", raw_file, "path:", path, "data:"
                    print data
                else:
                    # Raw JSON, make sure we treat it as unicode.
                    data = unicode(data, errors="replace")

                current_bytes = len_path + len_data + fileutil.RECORD_PREAMBLE_LENGTH["v1"]
                bytes_read += current_bytes
                self.bytes_read += current_bytes
                path_components = path.split("/")
                if len(path_components) != self.expected_dim_count:
                    # We're going to pop the ID off, but we'll also add the
                    # submission date, so it evens out.
                    print self.label, "Found an invalid path in record", record_count, path
                    continue

                key = path_components.pop(0)
                info = {}
                info["reason"] = path_components.pop(0)
                info["appName"] = path_components.pop(0)
                info["appVersion"] = path_components.pop(0)
                info["appUpdateChannel"] = path_components.pop(0)
                info["appBuildID"] = path_components.pop(0)
                dims = self.schema.dimensions_from(info, submission_date)

                try:
                    # Convert data:
                    if self.converter is None:
                        serialized_data = data
                        data_version = 1
                    else:
                        parsed_data, parsed_dims = self.converter.convert_json(data, dims[-1])
                        # TODO: take this out if it's too slow
                        for i in range(len(dims)):
                            if dims[i] != parsed_dims[i]:
                                print self.label, "Record", self.records_read, "mismatched dimension", i, dims[i], "!=", parsed_dims[i]
                        serialized_data = self.converter.serialize(parsed_data)
                        dims = parsed_dims
                        data_version = 2
                    try:
                        # Write to persistent storage
                        n = self.storage.write(key, serialized_data, dims, data_version)
                        self.bytes_written += len(key) + len(serialized_data) + 1
                        self.records_written += 1
                        # Compress rotated files as we generate them
                        if n.endswith(StorageLayout.PENDING_COMPRESSION_SUFFIX):
                            self.q_out.put(n)
                    except Exception, e:
                        self.write_bad_record(key, dims, serialized_data, str(e), "ERROR Writing to output file:")
                except BadPayloadError, e:
                    self.write_bad_record(key, dims, data, e.msg, "Bad Payload:")
                except Exception, e:
                    err_message = str(e)

                    # We don't need to write these bad records out - we know
                    # why they are being skipped.
                    if err_message != "Missing in payload: info.revision":
                        # TODO: recognize other common failure modes and handle them gracefully.
                        self.write_bad_record(key, dims, data, err_message, "Conversion Error:")
                        traceback.print_exc()
コード例 #43
0
def timestamp_to_date(timestamp):
    return date.fromtimestamp(timestamp)
コード例 #44
0
ファイル: de.py プロジェクト: nowherenearithaca/juriscraper
 def _get_case_dates(self):
     path = "{base}td[5]/text()".format(base=self.base_path)
     case_dates = []
     for t in self.html.xpath(path):
         case_dates.append(date.fromtimestamp(time.mktime(time.strptime(t, '%m/%d/%y'))))
     return case_dates
コード例 #45
0
def estimateTimes():
    profiles = Profile.objects.all()
    currentTime = time.time()
    today = date.fromtimestamp(currentTime)
    startTime = time.mktime((today - timedelta(days=7)).timetuple())

    for profile in profiles:
        dbName = "User_" + str(profile.id)
        answerListCollection = connection[dbName]["answerlist"]
        funf = connection[dbName]["funf"]
        recentPlaces = answerListCollection.find({"key": "RecentPlaces"})
        startTimes = {}
        endTimes = {}
        if recentPlaces.count() > 0:
            recentPlaces = recentPlaces[0]
            print recentPlaces["value"]
            for place in [p for p in recentPlaces["value"]]:
                print place
                placeKey = place["key"]
                startTimes[placeKey] = []
                endTimes[placeKey] = []
                print profile.uuid, placeKey
                bounds = place["bounds"]
                for intervalStart in [
                        t for t in range(int(startTime),
                                         int(time.mktime(today.timetuple())),
                                         3600 * 24)
                        if date.fromtimestamp(t).weekday() < 5
                ]:
                    print intervalStart, intervalStart + 24 * 3600
                    locations = [(entry["time"], entry["value"])
                                 for entry in funf.find(
                                     {
                                         "key": {
                                             "$regex": "LocationProbe$"
                                         },
                                         "time": {
                                             "$gte": intervalStart,
                                             "$lt": intervalStart + 24 * 3600
                                         }
                                     },
                                     limit=1000)]
                    if len(locations) > 0:
                        print len(locations)
                        #                        workLocations = [(value[0], value[1]) for value in workLocations]
                        #                        pdb.set_trace()
                        locations = [(value[0], (value[1]["mlatitude"],
                                                 value[1]["mlongitude"]))
                                     for value in locations]
                        atWork = [(loc[0], boxContainsPoint(bounds, loc[1]))
                                  for loc in locations]
                        deltas = reduce(
                            lambda x, y: x + [y]
                            if len(x) == 0 or x[-1][1] != y[1] else x, atWork,
                            [])
                        #workTimes = [value[0] for value in workLocations if boxContainsPoint(work["bounds"], value[1])]
                        if len(deltas) > 1:
                            if placeKey == "home":
                                print deltas
                                startTimes[placeKey].append(
                                    max([v[0] for v in deltas if not v[1]]) -
                                    intervalStart)
                                if 18000 < min([v[0] for v in deltas if v[1]
                                                ]) - intervalStart < 72000:
                                    endTimes[placeKey].append(
                                        min([v[0] for v in deltas if v[1]]) -
                                        intervalStart)
                            else:
                                startTimes[placeKey].append(
                                    min([v[0] for v in deltas if not v[1]]) -
                                    intervalStart)
                                endTimes[placeKey].append(
                                    max([v[0] for v in deltas if v[1]]) -
                                    intervalStart)
                if len(startTimes[placeKey]) > 0 and len(
                        endTimes[placeKey]) > 0:
                    averageStartTime = sum(startTimes[placeKey]) / (
                        3600 * len(startTimes[placeKey]))
                    averageEndTime = sum(
                        endTimes[placeKey]) / (3600 * len(endTimes[placeKey]))
                    recentPlaces["value"].remove(place)
                    place["start"] = averageStartTime
                    place["end"] = averageEndTime
                    recentPlaces["value"].append(place)
                    answerListCollection.save(recentPlaces)
コード例 #46
0
                        & ~data.智道分类.str.contains('再研究')
                        & ~data.智道分类.str.contains('看不懂')
                        & data.市场.str.contains('A股')]

        except:
            pass
        data = data[~data.股票代码.isin(no_symbol)]
        # data = data[~data.股票代码.isin(no_symbol)]
        data['year'] = sheet_name
        buy_signal = buy_signal.append(data[['year', '股票代码', '股票名称']])

    buy_signal = buy_signal.applymap(lambda x: apply_code(str(x)))
    buy_signal['timestamp'] = buy_signal.year.apply(
        lambda x: StockTradeDay.query_data(
            start_timestamp=str(x) + relocation_date, limit=1).timestamp.
        values[0] if str(x) + relocation_date <= date.fromtimestamp(time.time(
        )).strftime("%Y%m%d") else np.NaN)
    buy_signal = buy_signal.query("timestamp <= @end")
    code_dict = {}
    for stock_data in buy_signal[['股票代码', 'timestamp',
                                  '股票名称']].to_dict('records'):
        stock_n = stock_data['股票代码'] + '_' + stock_data['股票名称']
        if stock_n not in code_dict:
            code_dict.update({stock_n: stock_data['timestamp']})
        else:
            if code_dict[stock_n] >= stock_data['timestamp']:
                code_dict.update({stock_n: stock_data['timestamp']})
buy_signal['CODES'] = buy_signal['股票代码'] + '_' + buy_signal['股票名称']
list(set(buy_signal['股票代码'].tolist()))
data = BlockStock.query_data(filters=[
    BlockStock.stock_code.in_(list(set(buy_signal['股票代码'].tolist()))),
    BlockStock.block_type == 'swl1'
コード例 #47
0
ファイル: ca10.py プロジェクト: uglyboxer/juriscraper
 def _get_case_dates(self):
     dates = []
     for date_string in self.html.xpath("{}/td[@class='publish-date']/text()".format(self.base)):
         # ex: Nov-02-1995
         dates.append(date.fromtimestamp(time.mktime(time.strptime(date_string, '%b-%d-%Y'))))
     return dates
コード例 #48
0
ファイル: langstats.py プロジェクト: staticmem/rockbox
def main():
    if len(sys.argv) > 1:
        if sys.argv[1] == '--help':
            printhelp()
            sys.exit(0)
    if len(sys.argv) > 1:
        if sys.argv[1] == '--pretty':
            pretty = 1
    else:
        pretty = 0

    # get svnpaths to temporary folder
    workfolder = tempfile.mkdtemp() + "/"
    getsources(svnserver, svnpaths, workfolder)

    projectfolder = workfolder + langbase
    # lupdate translations and drop all obsolete translations
    subprocess.Popen(["lupdate-qt4", "-no-obsolete", "rbutilqt.pro"], \
            stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=projectfolder).communicate()
    # lrelease translations to get status
    output = subprocess.Popen(["lrelease-qt4", "rbutilqt.pro"], stdout=subprocess.PIPE, \
            stderr=subprocess.PIPE, cwd=projectfolder).communicate()
    lines = re.split(r"\n", output[0])

    re_updating = re.compile(r"^Updating.*")
    re_generated = re.compile(r"Generated.*")
    re_ignored = re.compile(r"Ignored.*")
    re_qmlang = re.compile(r"'.*/rbutil_(.*)\.qm'")
    re_qmbase = re.compile(r"'.*/(rbutil_.*)\.qm'")
    re_genout = re.compile(
        r"[^0-9]([0-9]+) .*[^0-9]([0-9]+) .*[^0-9]([0-9]+) ")
    re_ignout = re.compile(r"([0-9]+) ")

    # print header
    titlemax = 0
    for l in langs:
        cur = len(langs[l])
        if titlemax < cur:
            titlemax = cur

    if pretty == 1:
        delim = "+-" + titlemax * "-" \
                + "-+-------+-----+-----+-----+-----+--------------------+-----------------+"
        head = "| Language" + (titlemax - 8) * " " \
                + " |  Code |Trans| Fin |Unfin| Untr|       Updated      |       Done      |"
        print delim
        print "|" + " " * (len(head) / 2 - 3) + str(gettrunkrev(svnserver)) \
                + " " * (len(head) / 2 - 4) + "|"
        print delim
        print head
        print delim
    else:
        print "|  *Translation status as of revision " + str(
            gettrunkrev(svnserver)) + "*  ||||||||"
        print "| *Language* | *Language Code* | *Translations* | *Finished* | " \
        "*Unfinished* | *Untranslated* | *Updated* | *Done* |"

    client = pysvn.Client()
    # scan output
    i = 0
    tslateststamp = 0
    tsoldeststamp = time.time()
    while i < len(lines):
        line = lines[i]
        if re_updating.search(line):
            lang = re_qmlang.findall(line)
            tsfile = "lang/" + re_qmbase.findall(line)[0] + ".ts"
            fileinfo = client.info2(svnserver + langbase + tsfile)[0][1]
            tsrev = fileinfo.last_changed_rev.number
            tsdate = date.fromtimestamp(fileinfo.last_changed_date).isoformat()
            if fileinfo.last_changed_date > tslateststamp:
                tslateststamp = fileinfo.last_changed_date
            if fileinfo.last_changed_date < tsoldeststamp:
                tsoldeststamp = fileinfo.last_changed_date

            line = lines[i + 1]
            if re_generated.search(line):
                values = re_genout.findall(line)
                translations = string.atoi(values[0][0])
                finished = string.atoi(values[0][1])
                unfinished = string.atoi(values[0][2])
                line = lines[i + 2]
                if not line.strip():
                    line = lines[i + 3]
                if re_ignored.search(line):
                    ignored = string.atoi(re_ignout.findall(line)[0])
                else:
                    ignored = 0
            if langs.has_key(lang[0]):
                name = langs[lang[0]].strip()
            else:
                name = '(unknown)'

            percent = (float(finished + unfinished) * 100 /
                       float(translations + ignored))
            bar = "#" * int(percent / 10)
            if (percent % 10) > 5:
                bar += "+"
            bar += " " * (10 - len(bar))
            if pretty == 1:
                fancylang = lang[0] + " " * (5 - len(lang[0]))
            else:
                fancylang = lang[0]
            tsversion = str(tsrev) + " (" + tsdate + ")"
            status = [
                fancylang, translations, finished, unfinished, ignored,
                tsversion, percent, bar
            ]
            if pretty == 1:
                thisname = name + (titlemax - len(name)) * " "
                print "| " + thisname + " | %5s | %3s | %3s | %3s | %3s | %6s | %3i%% %s |" % tuple(
                    status)
            else:
                if percent > 90:
                    color = '%%GREEN%%'
                else:
                    if percent > 50:
                        color = '%%ORANGE%%'
                    else:
                        color = '%%RED%%'

                text = "| " + name + " | %s | %s | %s | %s | %s | %s | " + color + "%3i%%%%ENDCOLOR%% %s |"
                print text % tuple(status)
        i += 1

    if pretty == 1:
        print delim

    print "Last language updated on " + date.fromtimestamp(
        tslateststamp).isoformat()
    print "Oldest language update was " + date.fromtimestamp(
        tsoldeststamp).isoformat()
    shutil.rmtree(workfolder)
コード例 #49
0
 def get_release_date(self):
     return date.fromtimestamp(self.released / 1000)
コード例 #50
0
ファイル: filecreation.py プロジェクト: satee143/Python-oops
from datetime import date
from pathlib import Path

p = Path('/storage/emulated/0/oops2/')
#print(type(p))
#for i in p.iterdir():
#	print(i.stat())
#print(date.today())
f = [(i, date.today() - date.fromtimestamp(i.stat()[-2])) for i in p.iterdir()]
print(type(f))
for x, y in f:
    print(type(x))
    #print('{:20} {} days before.'.format(x.name, y.days))
コード例 #51
0
ファイル: date_demo.py プロジェクト: gzgdouru/python_module
from datetime import date
import time

if __name__ == "__main__":
    a = date.today()
    print(a.year, a.month, a.day)
    print("-" * 10)

    a = date(2017, 3, 1)
    b = date(2017, 3, 17)
    print(a > b)
    print((b - a).days)
    print("-" * 100)

    a = date.today()
    print(a.isocalendar())
    print(a.isoformat())
    print(a.isoweekday())
    print(a.weekday())
    print("-" * 100)

    a = date.today()
    print(a.timetuple())
    print(a.toordinal())
    print(a.ctime())
    print(date.fromtimestamp(time.time()))
    print("-" * 100)

コード例 #52
0
def extract_infos(event):
    text_date = date.fromtimestamp(event['date'])
    text_length = len(event['text'])
    text_userid = event['from']['peer_id']
    text_printname = event['from']['print_name']
    return text_date, text_length, text_userid, text_printname
コード例 #53
0
def dateTest():
    print('date.max:', date.max)
    print('date.min:', date.min)
    today = date.today()
    print('date.today():', today)
    print('date.fromtimestamp():', date.fromtimestamp(time.time()))
コード例 #54
0
        if statistics['total_appointments'] != 0:
            statistics['start_to_end_appointment_diff'] = trace['appointments'][statistics['total_appointments'] - 1]['Visit day'] - trace['appointments'][0]['Visit day']

            statistics['mean_days_in_waiting_list'] = round(stat.mean([int(appointment['relative_visit_day']) - appointment['relative_waiting_list_entry_date'] for appointment in trace['appointments']]), 3)
            try:
                statistics['mean_days_in_waiting_list_without_cancelled'] = round(stat.mean([int(appointment['relative_visit_day']) - appointment['relative_waiting_list_entry_date'] for appointment in trace['appointments'] if appointment['Visit status'] != 'Cancelled HS' and appointment['Visit status'] != 'Cancelled Pat']), 3)
            except:
                pass

        statistics['elapsed_time_between_appointments'] = []
        for i in range(0, statistics['total_appointments'] - 1):
            dict = {}
            dict["first_appointment"] = trace['appointments'][i]['Visit day']
            dict["second_appointment"] = trace['appointments'][i+1]['Visit day']
            dict["elapsed_time"] = (date.fromtimestamp(int(dict["second_appointment"]/1000)) - date.fromtimestamp(int(dict["first_appointment"]/1000))).days
            statistics['elapsed_time_between_appointments'].append(dict)

        statistics['elapsed_time_between_appointments_without_cancelled'] = []
        k = 0
        while k < statistics['total_appointments']:
            dict = {}
            if not (trace['appointments'][k]['Visit status'] == "Cancelled Pat" or trace['appointments'][k]['Visit status'] == "Cancelled HS"):
                j = k + 1
                while j < statistics['total_appointments']:
                    if not (trace['appointments'][j]['Visit status'] == "Cancelled Pat" or trace['appointments'][j]['Visit status'] == "Cancelled HS"):
                        dict["first_appointment"] = trace['appointments'][k]['Visit day']
                        dict["second_appointment"] = trace['appointments'][j]['Visit day']
                        dict["elapsed_time"] = (date.fromtimestamp(int(dict["second_appointment"] / 1000)) -
                                                date.fromtimestamp(int(dict["first_appointment"] / 1000))).days
                        statistics['elapsed_time_between_appointments_without_cancelled'].append(dict)
コード例 #55
0
def main():

    if len(argv) < 10:
        print 'not enough parameters'
        print 'usage: account_analysis.py {book url} {start year} {start month, numeric} {period type: monthly, quarterly, or yearly} {number of periods to show, from start year and month} {whether to show debits: debits-show for true, all other values false} {whether to show credits: credits-show for true, all other values false} {space separated account path, as many nested levels as desired} '
        print 'examples:\n'
        print "The following example analyzes 12 months of 'Assets:Test Account' from /home/username/test.gnucash, starting in January of 2010, and shows both credits and debits"
        print "gnucash-env python account_analysis.py '/home/username/test.gnucash' 2010 1 monthly 12 debits-show credits-show Assets 'Test Account'\n"
        print "The following example analyzes 2 quarters of 'Liabilities:First Level:Second Level' from /home/username/test.gnucash, starting March 2011, and shows credits but not debits"
        print "gnucash-env python account_analysis.py '/home/username/test.gnucash' 2011 3 quarterly 2 debits-noshow credits-show Liabilities 'First Level' 'Second Level"
        return

    try:
        (gnucash_file, start_year, start_month, period_type, periods,
         debits_show, credits_show) = argv[1:8]
        start_year, start_month, periods = [
            int(blah) for blah in (start_year, start_month, periods)
        ]

        debits_show = debits_show == DEBITS_SHOW
        credits_show = credits_show == CREDITS_SHOW

        account_path = argv[8:]

        gnucash_session = Session(gnucash_file, is_new=False)
        root_account = gnucash_session.book.get_root_account()
        account_of_interest = account_from_path(root_account, account_path)

        # a list of all the periods of interest, for each period
        # keep the start date, end date, a list to store debits and credits,
        # and sums for tracking the sum of all debits and sum of all credits
        period_list = [
            [
                start_date,
                end_date,
                [],  # debits
                [],  # credits
                ZERO,  # debits sum
                ZERO,  # credits sum
            ] for start_date, end_date in generate_period_boundaries(
                start_year, start_month, period_type, periods)
        ]
        # a copy of the above list with just the period start dates
        period_starts = [e[0] for e in period_list]

        # insert and add all splits in the periods of interest
        for split in account_of_interest.GetSplitList():
            trans = split.parent
            trans_date = date.fromtimestamp(trans.GetDate())

            # use binary search to find the period that starts before or on
            # the transaction date
            period_index = bisect_right(period_starts, trans_date) - 1

            # ignore transactions with a date before the matching period start
            # (after subtracting 1 above start_index would be -1)
            # and after the last period_end
            if period_index >= 0 and \
                    trans_date <= period_list[len(period_list)-1][1]:

                # get the period bucket appropriate for the split in question
                period = period_list[period_index]

                # more specifically, we'd expect the transaction date
                # to be on or after the period start, and  before or on the
                # period end, assuming the binary search (bisect_right)
                # assumptions from above are are right..
                #
                # in other words, we assert our use of binary search
                # and the filtered results from the above if provide all the
                # protection we need
                assert (trans_date >= period[0] and trans_date <= period[1])

                split_amount = gnc_numeric_to_python_Decimal(split.GetAmount())

                # if the amount is negative, this is a credit
                if split_amount < ZERO:
                    debit_credit_offset = 1
                # else a debit
                else:
                    debit_credit_offset = 0

                # store the debit or credit Split with its transaction, using the
                # above offset to get in the right bucket
                #
                # if we wanted to be really cool we'd keep the transactions
                period[2 + debit_credit_offset].append((trans, split))

                # add the debit or credit to the sum, using the above offset
                # to get in the right bucket
                period[4 + debit_credit_offset] += split_amount

        csv_writer = csv.writer(stdout)
        csv_writer.writerow(
            ('period start', 'period end', 'debits', 'credits'))

        def generate_detail_rows(values):
            return (('', '', '', '', trans.GetDescription(),
                     gnc_numeric_to_python_Decimal(split.GetAmount()))
                    for trans, split in values)


        for start_date, end_date, debits, credits, debit_sum, credit_sum in \
                period_list:
            csv_writer.writerow((start_date, end_date, debit_sum, credit_sum))

            if debits_show and len(debits) > 0:
                csv_writer.writerow(
                    ('DEBITS', '', '', '', 'description', 'value'))
                csv_writer.writerows(generate_detail_rows(debits))
                csv_writer.writerow(())
            if credits_show and len(credits) > 0:
                csv_writer.writerow(
                    ('CREDITS', '', '', '', 'description', 'value'))
                csv_writer.writerows(generate_detail_rows(credits))
                csv_writer.writerow(())

        # no save needed, we're just reading..
        gnucash_session.end()
    except:
        if "gnucash_session" in locals():
            gnucash_session.end()

        raise
コード例 #56
0
import math

print(math.sqrt(25))
print(math.pi)
print(math.degrees(2))
print(math.radians(60))
print(math.sin(2))
print(math.cos(0.5))
print(math.factorial(4))

import random

# printing random integer between 0 and 5
print(random.randint(0, 5))

# random number between 0 and 100
print(random.random() * 100)

List = [1, 4, True, 800, "python", 27, "hello"]

print(random.choice(List))

import datetime
from datetime import date
import time

print(time.time())

print(date.fromtimestamp(454554))
コード例 #57
0
import pandas as pd
from datetime import date
import numpy as np

dow_df = pd.read_csv('./stock_yahoo.csv')
dow_df.index = range(1, len(dow_df) + 1)

axp_df = pd.read_csv('./axp_stock.csv')
axp_df.index = range(1, len(axp_df) + 1)

mindex = []

for item in axp_df['date']:
    day = date.fromtimestamp(item)
    day = date.strftime(day, '%Y-%m-%d')
    mindex.append(day)

axp_df.index = mindex

# 删除某一列
axp_df_new = axp_df.drop(['date'], axis=1)

status = np.sign(np.diff(axp_df.close))

# 请采用倒序观看, tushare 国内金融数据平台
# 1    letv.shape
# 3    letv = ts.get_hist_data('300104', start='2017-06-06', end='2018-02-05')
# 4    type(letv)
# 5    type(letv, start='2017-06-06', end='2018-02-05')
# 6    letv.info()
# 7    letv = ts.get_hist_data('300104')
コード例 #58
0
# You can create a date object containing the current date by using a
# classmethod named today()

# Get date from a timestamp

# We can also create date objects from a timestamp. A Unix timestamp is
# the number of seconds between a particular date and January 1, 1970 at UTC.
# You can convert a timestamp to date using fromtimestamp() method.

# why its always 1st jan 1970 , Because - '1st January 1970' usually
# called as "epoch date" is the date when the time started for
# Unix computers, and that timestamp is marked as '0'. Any time
# since that date is calculated based on the number of seconds elapsed

from datetime import date
timestamp = date.fromtimestamp(3243546575)
print('Date =', timestamp)

# Print today's year, month and day

# We can get year, month, day, day of the week etc. from the date object easily

from datetime import date
today = date.today()
print('current year', today.year)
print('current month', today.month)
print('current day', today.day)
print()
print()

# datetime.time
コード例 #59
0
    def get_historical(self, days=30):
        influx_payload = []
        start_date = date.today() - timedelta(days=days)
        params = {'cmd': 'get_history', 'grouping': 1, 'length': 1000000}
        req = self.session.prepare_request(
            Request('GET', self.server.url + self.endpoint, params=params))
        g = connection_handler(self.session, req, self.server.verify_ssl)

        if not g:
            return

        get = g['response']['data']['data']

        params = {'cmd': 'get_stream_data', 'row_id': 0}
        sessions = []
        for history_item in get:
            if not history_item['id']:
                self.logger.debug('Skipping entry with no ID. (%s)',
                                  history_item['full_title'])
                continue
            if date.fromtimestamp(history_item['started']) < start_date:
                continue
            params['row_id'] = history_item['id']
            req = self.session.prepare_request(
                Request('GET', self.server.url + self.endpoint, params=params))
            g = connection_handler(self.session, req, self.server.verify_ssl)
            if not g:
                self.logger.debug(
                    'Could not get historical stream data for %s. Skipping.',
                    history_item['full_title'])
            try:
                self.logger.debug('Adding %s to history',
                                  history_item['full_title'])
                history_item.update(g['response']['data'])
                sessions.append(TautulliStream(**history_item))
            except TypeError as e:
                self.logger.error(
                    'TypeError has occurred : %s while creating TautulliStream structure',
                    e)
                continue

        for session in sessions:
            try:
                geodata = self.geoiphandler.lookup(session.ip_address)
            except (ValueError, AddressNotFoundError):
                self.logger.debug('Public IP missing for Tautulli session...')
                if not self.my_ip:
                    # Try the fallback ip in the config file
                    try:
                        self.logger.debug(
                            'Attempting to use the fallback IP...')
                        geodata = self.geoiphandler.lookup(
                            self.server.fallback_ip)
                    except AddressNotFoundError as e:
                        self.logger.error('%s', e)

                        self.my_ip = self.session.get(
                            'http://ip.42.pl/raw').text
                        self.logger.debug(
                            'Looked the public IP and set it to %s',
                            self.my_ip)

                        geodata = self.geoiphandler.lookup(self.my_ip)

                else:
                    geodata = self.geoiphandler.lookup(self.my_ip)

            if not all([geodata.location.latitude, geodata.location.longitude
                        ]):
                latitude = 37.234332396
                longitude = -115.80666344
            else:
                latitude = geodata.location.latitude
                longitude = geodata.location.longitude

            if not geodata.city.name:
                location = '👽'
            else:
                location = geodata.city.name

            decision = session.transcode_decision
            if decision == 'copy':
                decision = 'direct stream'

            video_decision = session.stream_video_decision
            if video_decision == 'copy':
                video_decision = 'direct stream'
            elif video_decision == '':
                video_decision = 'Music'

            quality = session.stream_video_resolution
            if not quality:
                quality = session.container.upper()
            elif quality in ('SD', 'sd', '4k'):
                quality = session.stream_video_resolution.upper()
            elif session.stream_video_full_resolution:
                quality = session.stream_video_full_resolution
            else:
                quality = session.stream_video_resolution + 'p'

            # Platform Overrides
            platform_name = session.platform
            if platform_name in 'osx':
                platform_name = 'Plex Mac OS'
            if platform_name in 'windows':
                platform_name = 'Plex Windows'

            player_state = 100

            hash_id = hashit(
                f'{session.id}{session.session_key}{session.user}{session.full_title}'
            )
            influx_payload.append({
                "measurement":
                "Tautulli",
                "tags": {
                    "type": "Session",
                    "session_id": session.session_id,
                    "friendly_name": session.friendly_name,
                    "username": session.user,
                    "title": session.full_title,
                    "product": session.product,
                    "platform": platform_name,
                    "quality": quality,
                    "video_decision": video_decision.title(),
                    "transcode_decision": decision.title(),
                    "transcode_hw_decoding": session.transcode_hw_decoding,
                    "transcode_hw_encoding": session.transcode_hw_encoding,
                    "media_type": session.media_type.title(),
                    "audio_codec": session.audio_codec.upper(),
                    "stream_audio_codec": session.stream_audio_codec.upper(),
                    "quality_profile": session.quality_profile,
                    "progress_percent": session.progress_percent,
                    "region_code": geodata.subdivisions.most_specific.iso_code,
                    "location": location,
                    "full_location":
                    f'{geodata.subdivisions.most_specific.name} - {geodata.city.name}',
                    "latitude": latitude,
                    "longitude": longitude,
                    "player_state": player_state,
                    "device_type": platform_name,
                    "relayed": session.relayed,
                    "secure": session.secure,
                    "server": self.server.id
                },
                "time":
                datetime.fromtimestamp(
                    session.stopped).astimezone().isoformat(),
                "fields": {
                    "hash": hash_id
                }
            })
            try:
                self.dbmanager.write_points(influx_payload)
            except InfluxDBClientError as e:
                if "beyond retention policy" in str(e):
                    self.logger.debug(
                        'Only imported 30 days of data per retention policy')
                else:
                    self.logger.error(
                        'Something went wrong... post this output in discord: %s',
                        e)
コード例 #60
0
def getStartTime(daysAgo, startAtMidnight):
    currentTime = time.time()
    return time.mktime((date.fromtimestamp(currentTime) - timedelta(days=daysAgo)).timetuple()) if startAtMidnight else currentTime - daysAgo * 24 * 3600