def seed(self, a=None): """Initialize internal state from hashable object. None or no argument seeds from current time or from an operating system specific randomness source if available. If a is not None or an int or long, hash(a) is used instead. If a is an int or long, a is used directly. Distinct values between 0 and 27814431486575L inclusive are guaranteed to yield distinct internal states (this guarantee is specific to the default Wichmann-Hill generator). """ if a is None: try: a = long(_hexlify(_urandom(16)), 16) except NotImplementedError: import time a = long(time.time() * 256) # use fractional seconds if not isinstance(a, (int, long)): a = hash(a) a, x = divmod(a, 30268) a, y = divmod(a, 30306) a, z = divmod(a, 30322) self._seed = int(x)+1, int(y)+1, int(z)+1 self.gauss_next = None
def format_time(seconds): minutes, seconds = divmod(seconds, 60) if minutes > 60: hours, minutes = divmod(minutes, 60) return "%02d:%02d:%02d" % (hours, minutes, seconds) else: return "%02d:%02d" % (minutes, seconds)
def tout (t): # Overdone time formatter to put all appropriate zeros in place t = int (round (t)) hh, rem = divmod (t, 3600) mm, ss = divmod (rem, 60) lst = [] if t < 10: ls = ["0:00:0", str (t)] elif t < 60: ls = ["0:00:", str (t)] elif t<3600: if mm < 10 and ss < 10: ls = ["0:0", str (mm), ":0", str (ss)] elif mm < 10: ls = ["0:0", str (mm), ":", str (ss)] elif ss < 10: ls = ["0:", str (mm), ":0", str (ss)] else: ls = ["0:", str (mm), ":", str (ss)] else: if mm < 10 and ss < 10: ls = [str (hh), ":0", str (mm), ":0", str (ss)] elif mm < 10: ls = [str (hh), ":0", str (mm), ":", str (ss)] elif ss < 10: ls = [str (hh), ":", str (mm), ":0", str (ss)] else: ls = [str (hh), ":", str (mm), ":", str (ss)] return "".join (ls)
def __divmod__(self, other): if self.__is_negative() and other.__is_negative(): q, r = divmod(-self, -other) return q, -r elif self.__is_negative(): q, r = divmod(-self, other) q, r = -q, -r if r != zero: r += other q -= one return q, r elif other.__is_negative(): q, r = divmod(self, -other) q = -q if r != zero: r += other q -= one return q, r else: # neither self nor other is negative s1 = self b = one q, r = zero, zero while s1 >= b: b <<= one while b > one: b >>= one q, r = q << one, r << one if s1 >= b: s1 -= b r += one if r >= other: r -= other q += one return (q, r)
def timeago(secs, precision=0): """ :param int secs: number of seconds "ago". :param int precision: optional decimal precision of returned seconds. Pass a duration of time and return human readable shorthand, fe:: >>> asctime(126.32) ' 2m 6s', >>> asctime(70.9999, 2) ' 1m 10.99s' """ # split by days, mins, hours, secs years = weeks = days = mins = hours = 0 mins, secs = divmod(secs, 60) hours, mins = divmod(mins, 60) days, hours = divmod(hours, 24) weeks, days = divmod(days, 7) years, weeks = divmod(weeks, 52) ((num1, num2), (label1, label2)) = ( ((years, weeks), (u'y', u'w')) if years >= 1.0 else ((weeks, days), (u'w', u'd')) if weeks >= 1.0 else ((days, hours), (u'd', u'h')) if days >= 1.0 else ((hours, mins), (u'h', u'm')) if hours >= 1.0 else ((mins, secs), (u'm', u's'))) return (u'%2d%s%2.*f%s' % (num1, label1, precision, num2, label2,))
def after_run(self, run_context, run_values): results, step = run_values.results self._iter_count = step if not results: return self._timer.update_last_triggered_step(self._iter_count - 1) if self._model.steps_in_epoch is None: deco_print("Global step {}:".format(step), end=" ") else: deco_print( "Epoch {}, global step {}:".format( step // self._model.steps_in_epoch, step), end=" ", ) loss = results[0] deco_print("loss = {:.4f}".format(loss), start="", end=", ") tm = (time.time() - self._last_time) / self._every_steps m, s = divmod(tm, 60) h, m = divmod(m, 60) deco_print( "time per step = {}:{:02}:{:.3f}".format(int(h), int(m), s), start="", ) self._last_time = time.time()
def add_automatic_comment(self): if self.fixed is True: text = ( "This %s has been scheduled for fixed downtime from %s to %s. " "Notifications for the %s will not be sent out during that time period." % ( self.ref.my_type, time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(self.start_time)), time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(self.end_time)), self.ref.my_type) ) else: hours, remainder = divmod(self.duration, 3600) minutes, seconds = divmod(remainder, 60) text = ("This %s has been scheduled for flexible downtime starting between %s and %s " "and lasting for a period of %d hours and %d minutes. " "Notifications for the %s will not be sent out during that time period." % ( self.ref.my_type, time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(self.start_time)), time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(self.end_time)), hours, minutes, self.ref.my_type) ) if self.ref.my_type == 'host': comment_type = 1 else: comment_type = 2 c = Comment(self.ref, False, "(Nagios Process)", text, comment_type, 2, 0, False, 0) self.comment_id = c.id self.extra_comment = c self.ref.add_comment(c)
def format_duration(seconds): sec_format = '%05.2f' minutes, seconds = divmod(seconds, 60) if minutes: sec_format = '%04.1f' hours, minutes = divmod(minutes, 60) if hours: sec_format = '%02d' days, hours = divmod(hours, 24) weeks, days = divmod(days, 7) greatest_unit = ( not weeks, not days, not hours, not minutes, not seconds, False ).index(False) rv = '' if weeks: rv += '%dW' % weeks if days: rv += '%dD' % days if rv: rv += ' ' if greatest_unit <= 2: rv += '%02d:' % hours if greatest_unit <= 3: rv += '%02d:' % minutes rv += sec_format % seconds return rv
def timeago(secs, precision=0): """ Pass float or int in seconds, and return string of 0d 0h 0s format, but only the two most relative, fe: asctime(126.32) returns 2m6s, asctime(10.9999, 2) returns 10.99s """ # split by days, mins, hours, secs years, weeks, days, mins, hours = 0, 0, 0, 0, 0 mins, secs = divmod(secs, 60) hours, mins = divmod(mins, 60) days, hours = divmod(hours, 24) weeks, days = divmod(days, 7) years, weeks = divmod(weeks, 52) years, weeks, days, hours, mins = ( int(years), int(weeks), int(days), int(hours), int(mins)) # return printable string if years > 0: return '%3s%-3s' % (str(years)+'y', str(weeks)+'w',) if weeks > 0: return '%3s%-3s' % (str(weeks)+'w', str(days)+'d',) if days > 0: return '%3s%-3s' % (str(days)+'d', str(hours)+'h',) elif hours > 0: return '%3s%-3s' % (str(hours)+'h', str(mins)+'m',) elif mins > 0: return '%3s%-3s' % (str(mins)+'m', str(int(secs))+'s',) else: fmt = '%.'+str(precision)+'f s' return fmt % secs
def seed(self, a=None): """Initialize internal state from hashable object. None or no argument seeds from current time. If a is not None or an int or long, hash(a) is used instead. If a is an int or long, a is used directly. Distinct values between 0 and 27814431486575L inclusive are guaranteed to yield distinct internal states (this guarantee is specific to the default Wichmann-Hill generator). """ if a is None: # Initialize from current time import time a = long(time.time() * 256) if type(a) not in (type(3), type(3L)): a = hash(a) a, x = divmod(a, 30268) a, y = divmod(a, 30306) a, z = divmod(a, 30322) self._seed = int(x) + 1, int(y) + 1, int(z) + 1 self.gauss_next = None
def timedelta(pub_date): delta = datetime.now(tz=timezone.utc) - pub_date secs = delta.total_seconds() days, remainder = divmod(secs, 86400) hours, remainder = divmod(remainder, 3600) minutes, seconds = divmod(remainder, 60) days_str = '' if days != 0: days_str = '{:0.0f} day'.format(days) if days > 1: days_str += 's' days_str += ', ' hours_str = '' if hours != 0: hours_str = '{:0.0f} hour'.format(hours) if hours > 1: hours_str += 's' hours_str += ', ' minutes_str = '' if minutes != 0: minutes_str = '{:0.0f} minute'.format(minutes) if minutes > 1: minutes_str += 's' minutes_str += ', ' else: minutes_str = 'seconds' delta_str = '{}{}{}'.format(days_str, hours_str, minutes_str) return delta_str.strip(', ') + ' ago'
def live_index(args): """Index pre-loaded EAD records into live indexes.""" global session, db lgr = session.logger lgr.log_info(session, "Indexing pre-loaded records into live indexes..." ) start = time.time() # Get the Indexes if not db.indexes: db._cacheIndexes(session) if args.step == 'index': # Clear the existing Indexes for idx in db.indexes.itervalues(): if not idx.get_setting(session, 'noIndexDefault', 0): idx.clear(session) # Run indexing _index(session, db, args) if args.test: test_expectedResults(args) # Log completed message (mins, secs) = divmod(time.time() - start, 60) (hours, mins) = divmod(mins, 60) lgr.log_info( session, 'Live indexing complete ({0:.0f}h {1:.0f}m {2:.0f}s)' ''.format(hours, mins, secs) ) return 0
def clusters(args): """Load and index subject clusters.""" global session, db lgr = session.logger lgr.log_info(session, 'Accumulating subject clusters...') start = time.time() recordStore = db.get_object(session, 'recordStore') clusDocFac = db.get_object(session, 'clusterDocumentFactory') for rec in recordStore: clusDocFac.load(session, rec) session.database = '{0}_cluster'.format(session.database) clusDb = server.get_object(session, session.database) # Remove existing live index clusDb.clear_indexes(session) clusFlow = clusDb.get_object(session, 'buildClusterWorkflow') clusFlow.process(session, clusDocFac) (mins, secs) = divmod(time.time() - start, 60) (hours, mins) = divmod(mins, 60) lgr.log_info( session, 'Subject Clustering complete ({0:.0f}h {1:.0f}m {2:.0f}s)' ''.format(hours, mins, secs) ) # return session.database to the default (finding aid) DB session.database = db.id return 0
def print_time_diff_nosuffixes(diff): if diff is None: return '?' hours, rem = divmod(diff, 3600) minutes, seconds = divmod(rem, 60) return '{:02d}:{:02d}:{:02d}'.format(int(hours), int(minutes), int(seconds))
def _mods(i, mod): (q, r) = divmod(i, mod) while True: yield r if not q: break (q, r) = divmod(q, mod)
def req(): # Get URLs from a text file, remove white space. db = MySQLDatabase(DATABASE_HOST, DATABASE_USER, DATABASE_PASSWORD, DATABASE_NAME) db_worker_view = db.get_work_view() articles = db_worker_view.retrieve_all_articles() #articles = db_worker_view.retrieve_all_articles_questionmark() # measure time start = time.clock() start_time_iteration = start iteration_number = 483 for i, article in enumerate(articles): # print some progress if i % 10000 == 0: #print time for the iteration seconds = time.clock() - start_time_iteration m, s = divmod(seconds, 60) h, m = divmod(m, 60) print "Number of crawled articles: %d. Total time for last iteration of 10000 articles: %d:%02d:%02d" % (i, h, m, s) start_time_iteration = time.clock() iteration_number += 1 # Thread pool. # Blocks other threads (more than the set limit). pool.acquire(blocking=True) # Create a new thread. # Pass each URL (i.e. u parameter) to the worker function. t = threading.Thread(target=worker, args=(MEDIAWIKI_API_ENDPOINT+urllib.quote(article['title'])+'/'+str(article['rev_id']), article, iteration_number)) # Start the newly create thread. t.start() seconds = time.clock() - start m, s = divmod(seconds, 60) h, m = divmod(m, 60) print "Total time: %d:%02d:%02d" % (h, m, s)
def format_number(number, unit=None, units=None): plural = (abs(number) > 1) if number >= 10000: pow10 = 0 x = number while x >= 10: x, r = divmod(x, 10) pow10 += 1 if r: break if not r: number = '10^%s' % pow10 if isinstance(number, int) and number > 8192: pow2 = 0 x = number while x >= 2: x, r = divmod(x, 2) pow2 += 1 if r: break if not r: number = '2^%s' % pow2 if not unit: return str(number) if plural: if not units: units = unit + 's' return '%s %s' % (number, units) else: return '%s %s' % (number, unit)
def elemop(N=1000): r''' (Takes about 40ms on a first-generation Macbook Pro) ''' for i in range(N): assert a+b == 579 assert a-b == -333 assert b*a == a*b == 56088 assert b%a == 87 assert divmod(a, b) == (0, 123) assert divmod(b, a) == (3, 87) assert -a == -123 assert pow(a, 10) == 792594609605189126649 assert pow(a, 7, b) == 99 assert cmp(a, b) == -1 assert '7' in str(c) assert '0' not in str(c) assert a.sqrt() == 11 assert _g.lcm(a, b) == 18696 assert _g.fac(7) == 5040 assert _g.fib(17) == 1597 assert _g.divm(b, a, 20) == 12 assert _g.divm(4, 8, 20) == 3 assert _g.divm(4, 8, 20) == 3 assert _g.mpz(20) == 20 assert _g.mpz(8) == 8 assert _g.mpz(4) == 4 assert a.invert(100) == 87
def jd_to(jd) : ordinal = int(jd) - 1721425 if 0 < ordinal < 3652060:## > 4x faster # datetime(9999, 12, 31).toordinal() == 3652059 dt = datetime.fromordinal(ordinal) return (dt.year, dt.month, dt.day) ##wjd = floor(jd - 0.5) + 0.5 qc, dqc = divmod(jd - epoch, 146097) ## qc ~~ quadricent cent, dcent = divmod(dqc, 36524) quad, dquad = divmod(dcent, 1461) yindex = dquad//365 ## divmod(dquad, 365)[0] year = qc*400 + cent*100 + quad*4 + yindex + (cent!=4 and yindex!=4) yearday = jd - to_jd(year, 1, 1) # Python 2.x and 3.x: if jd < to_jd(year, 3, 1): leapadj = 0 elif isLeap(year): leapadj = 1 else: leapadj = 2 # Python >= 2.5: #leapadj = 0 if jd < to_jd(year, 3, 1) else (1 if isLeap(year) else 2) month = ((yearday+leapadj) * 12 + 373) // 367 day = jd - to_jd(year, month, 1) + 1 return int(year), int(month), int(day)
def printTotalTime(self): hours, rem = divmod(self.time_list[-1], 3600) minutes, seconds = divmod(rem, 60) if self.is_started: sct.printv('Remaining time: {:0>2}:{:0>2}:{:05.2f}'.format(int(hours), int(minutes), seconds)) else: sct.printv('Total time: {:0>2}:{:0>2}:{:05.2f}'.format(int(hours), int(minutes), seconds))
def run(self, workspace): '''Run one image set''' m = workspace.measurements well_count, site_index = divmod(m.image_set_number - 1, self.site_count.value) if self.order == O_ROW: row_count, column_index = divmod(well_count, self.column_count.value) plate_index, row_index = divmod(row_count, self.row_count.value) else: column_count, row_index = divmod(well_count, self.row_count.value) plate_index, column_index = divmod(column_count, self.column_count.value) row_text_indexes = [ x % 26 for x in reversed( [int(row_index / (26 ** i)) for i in range(self.row_digits)])] row_text = ['ABCDEFGHIJKLMNOPQRSTUVWXYZ'[x] for x in row_text_indexes] row_text = reduce(lambda x,y: x+y, row_text) well_template = "%s%0" + str(self.column_digits) + "d" well = well_template % (row_text, column_index+1) statistics = [(cpmeas.M_SITE, site_index + 1), (cpmeas.M_ROW, row_text), (cpmeas.M_COLUMN, column_index + 1), (cpmeas.M_WELL, well), (cpmeas.M_PLATE, plate_index + 1)] for feature, value in statistics: m.add_image_measurement(feature, value) workspace.display_data.col_labels = ("Metadata", "Value") workspace.display_data.statistics = [ (feature, str(value)) for feature, value in statistics]
def durationHuman(seconds): """ Turn number of seconds into human readable string """ seconds = long(round(seconds)) minutes, seconds = divmod(seconds, 60) hours, minutes = divmod(minutes, 60) days, hours = divmod(hours, 24) years, days = divmod(days, 365.242199) sdays = str(days) syears = str(years) sseconds = str(seconds).rjust(2, '0') sminutes = str(minutes).rjust(2, '0') shours = str(hours).rjust(2, '0') duration = [] if years > 0: duration.append('{} year'.format(syears) + 's'*(years != 1) + ' ') else: if days > 0: duration.append('{} day'.format(days) + 's'*(days != 1) + ' ') if hours > 0: duration.append('{}:'.format(shours)) if minutes >= 0: duration.append('{}:'.format(sminutes)) if seconds >= 0: duration.append('{}'.format(sseconds)) return ''.join(duration)
def durationHuman(seconds): seconds = long(round(seconds)) minutes, seconds = divmod(seconds, 60) hours, minutes = divmod(minutes, 60) days, hours = divmod(hours, 24) years, days = divmod(days, 365.242199) sdays = str(days) syears = str(years) sseconds = str(seconds).rjust(2, '0') sminutes = str(minutes).rjust(2, '0') shours = str(hours).rjust(2, '0') duration = [] if years > 0: duration.append('%s year' % syears + 's'*(years != 1) + ' ') else: if days > 0: duration.append('%s day' % sdays + 's'*(days != 1) + ' ') if hours > 0: duration.append('%s:' % shours) if minutes >= 0: duration.append('%s:' % sminutes) if seconds >= 0: duration.append('%s' % sseconds) return ''.join(duration)
def as_hms(value): """Given a floating-point number of seconds, translates it to an HH:MM:SS string.""" long_seconds = int(value) (long_minutes, seconds) = divmod(long_seconds, 60) (hours, minutes) = divmod(long_minutes, 60) return "%d:%02d:%02d" % (hours, minutes, seconds)
def _get_time_diff_formatted(old, recent): """ Formats the difference between two datetime objects """ diff = recent - old days = diff.days m, s = divmod(diff.seconds, 60) h, m = divmod(m, 60) return '{} days, {} hours, {} minutes, and {} seconds'.format(days, h, m, s)
def _fix(self): if abs(self.microseconds) > 999999: s = self.microseconds//abs(self.microseconds) div, mod = divmod(self.microseconds*s, 1000000) self.microseconds = mod*s self.seconds += div*s if abs(self.seconds) > 59: s = self.seconds//abs(self.seconds) div, mod = divmod(self.seconds*s, 60) self.seconds = mod*s self.minutes += div*s if abs(self.minutes) > 59: s = self.minutes//abs(self.minutes) div, mod = divmod(self.minutes*s, 60) self.minutes = mod*s self.hours += div*s if abs(self.hours) > 23: s = self.hours//abs(self.hours) div, mod = divmod(self.hours*s, 24) self.hours = mod*s self.days += div*s if abs(self.months) > 11: s = self.months//abs(self.months) div, mod = divmod(self.months*s, 12) self.months = mod*s self.years += div*s if (self.hours or self.minutes or self.seconds or self.microseconds or self.hour is not None or self.minute is not None or self.second is not None or self.microsecond is not None): self._has_time = 1 else: self._has_time = 0
def _get_col_row(self, n): if self._direction == "column": col, row = divmod(n, self._nrows) else: row, col = divmod(n, self._ncols) return col, row
def test_time(self): t = datetime(1, 1, 1, 3, 30, 0) deltas = np.random.randint(1, 20, 3).cumsum() ts = np.array([(t + timedelta(minutes=int(x))).time() for x in deltas]) df = DataFrame({'a': np.random.randn(len(ts)), 'b': np.random.randn(len(ts))}, index=ts) ax = df.plot() # verify tick labels ticks = ax.get_xticks() labels = ax.get_xticklabels() for t, l in zip(ticks, labels): m, s = divmod(int(t), 60) h, m = divmod(m, 60) xp = l.get_text() if len(xp) > 0: rs = time(h, m, s).strftime('%H:%M:%S') self.assertEqual(xp, rs) # change xlim ax.set_xlim('1:30', '5:00') # check tick labels again ticks = ax.get_xticks() labels = ax.get_xticklabels() for t, l in zip(ticks, labels): m, s = divmod(int(t), 60) h, m = divmod(m, 60) xp = l.get_text() if len(xp) > 0: rs = time(h, m, s).strftime('%H:%M:%S') self.assertEqual(xp, rs)
def draw_peers(state): window_height = state['y'] - 4 win_peers = curses.newwin(window_height, 75, 3, 0) offset = state['peerinfo_offset'] for index in xrange(offset, offset+window_height): if index < len(state['peerinfo']): peer = state['peerinfo'][index] condition = (index == offset+window_height-1) and (index+1 < len(state['peerinfo'])) condition = condition or ( (index == offset) and (index > 0) ) if condition: # scrolling up or down is possible win_peers.addstr(index-offset, 3, "...") else: if peer['inbound']: win_peers.addstr(index-offset, 1, 'I') elif 'syncnode' in peer: if peer['syncnode']: # syncnodes are outgoing only win_peers.addstr(index-offset, 1, 'S') addr_str = peer['addr'].replace(".onion","").replace(":" + g.node_port,"").replace(":" + g.node_port_test,"").strip("[").strip("]") # truncate long ip addresses (ipv6) addr_str = (addr_str[:17] + '...') if len(addr_str) > 20 else addr_str win_peers.addstr(index-offset, 3, addr_str) win_peers.addstr(index-offset, 24, peer['subver'].strip("/").replace("Satoshi:","Sat")[:11]) mbrecv = "% 7.1f" % ( float(peer['bytesrecv']) / 1048576 ) mbsent = "% 7.1f" % ( float(peer['bytessent']) / 1048576 ) win_peers.addstr(index-offset, 35, mbrecv + 'MB') win_peers.addstr(index-offset, 45, mbsent + 'MB') timedelta = int(time.time() - peer['conntime']) m, s = divmod(timedelta, 60) h, m = divmod(m, 60) d, h = divmod(h, 24) time_string = "" if d: time_string += ("%d" % d + "d").rjust(3) + " " time_string += "%02d" % h + ":" elif h: time_string += "%02d" % h + ":" time_string += "%02d" % m + ":" time_string += "%02d" % s win_peers.addstr(index-offset, 55, time_string.rjust(12)) if 'syncheight' in peer: win_peers.addstr(index-offset, 69, str(peer['syncheight']).rjust(6)) win_peers.refresh()
def check(self): #print("check") self.value -= 1 if self.value == 0: #print("Der Timer ist abgelaufen") self.bomb_text.write(" Boom!") self.sleepTimerelapsed.emit() self.countDown.stop() self.isActive = False elif self.value == 10: self.sleepTimertenseconds.emit() else: m, s = divmod(self.value, 60) h, m = divmod(m, 60) text = "%02d:%02d:%02d" % (h, m, s) #self.lbl_countdown.setText(text) self.bomb_text.write(text) self.blockValueSignal = True if self.forceSpinBoxWidget: self.sb_hours.setValue(h) self.sb_minutes.setValue(m) else: self.sb_minutes_value.write(m) self.sb_hours_value.write(h)# = h self.blockValueSignal = False
def _to_list(a): c = [] while a: a, r = divmod(a, 2) c.append(r) return c
print((0,1,2)<(0,3,4)) print((0,1,2000000)<(0,3,4)) a = 10 b = 20 temp = a a = b b = temp print(a,b) a,b=b,a print(a,b) #a, b = 1, 2, 3 addr = '*****@*****.**' uname, domain = addr.split('@') print(uname) print(domain) t = divmod(7,3) print(t) quot, rem = divmod(7,3) print(quot,rem) def min_max(t): return min(t), max(t) def printall(*args): print(args) printall(1, 2.0, '3') t = (7,3) #divmod(t) print(divmod(*t)) print(max(1,2,3)) #sum(1,2,3)
def receive_midi_cc(self, channel, cc_no, cc_value): if (channel == SCENE_CH): if (cc_no == SCENE_BASE_CC): self._scene_sel_count += 1 if (self._scene_sel_count >= 4): index = list(self.song().scenes).index( self.song().view.selected_scene) if (cc_value < 64): index += 1 else: index -= 1 index = max(0, min(index, len(self.song().scenes) - 1)) self.song().view.selected_scene = self.song().scenes[index] self._scene_sel_count = 0 elif (cc_no == SCENE_BASE_CC + 1): self._track_sel_count += 1 if (self._track_sel_count >= 4): tracks = list(self.song().visible_tracks) returns = self.song().return_tracks if (len(returns) > 0): tracks.extend(returns) tracks.append(self.song().master_track) index = tracks.index(self.song().view.selected_track) if (cc_value < 64): index += 1 else: index -= 1 index = max(0, min(index, len(tracks) - 1)) detail_clip = self.application().view.is_view_visible( 'Detail/Clip') self.song().view.selected_track = tracks[index] self.song().view.selected_track.view.select_instrument() if (detail_clip): self.application().view.show_view('Detail/Clip') self._track_sel_count = 0 elif (cc_no == SCENE_BASE_CC + 2): self._quant_sel_count += 1 if (self._quant_sel_count >= 4): quant = self.song().clip_trigger_quantization index = list(quant.values).index(quant) if (cc_value < 64): index += 1 else: index -= 1 index = max(0, min(index, len(quant.values) - 1)) self.song().clip_trigger_quantization = quant.values[index] self._quant_sel_count = 0 elif (cc_no == SCENE_BASE_CC + 3): step = self.quantization_step() if (cc_value < 64): self.song().scrub_by(step) else: self.song().scrub_by(-step) elif (cc_no == SCENE_BASE_CC + 4): clip = self.song().view.detail_clip if (clip): step = self.quantization_step() if (cc_value < 64): clip.move_playing_pos(step) else: clip.move_playing_pos(-step) elif (cc_no == SCENE_BASE_CC + 5): clip = self.song().view.detail_clip step = self.quantization_step() if (clip): div, mod = divmod(clip.loop_start, step) if (cc_value < 64): clip.loop_end += step - mod clip.loop_start += step - mod elif (mod): clip.loop_start -= mod clip.loop_end -= mod elif (div > 0): clip.loop_start -= step clip.loop_end -= step else: div, mod = divmod(self.song().loop_start, step) if (cc_value < 64): self.song().loop_start += step - mod elif (mod): self.song().loop_start -= mod elif (div > 0): self.song().loop_start -= step elif (cc_no == SCENE_BASE_CC + 6): clip = self.song().view.detail_clip step = self.quantization_step() if (clip): div, mod = divmod(clip.loop_end - clip.loop_start, step) if ((cc_value < 64) or (div < 1)): clip.loop_end += step - mod elif (mod): clip.loop_end -= mod elif (div > 1): clip.loop_end -= step else: div, mod = divmod(self.song().loop_length, step) if ((cc_value < 64) or (div < 1)): self.song().loop_length += step - mod elif (mod): self.song().loop_length -= mod elif (div > 1): self.song().loop_length -= step elif (cc_no == SCENE_BASE_CC + 7): clip = self.song().view.detail_clip if (clip): if (clip.is_audio_clip): if ((cc_value < 64) and (clip.pitch_coarse < 48)): clip.pitch_coarse += 1 elif (clip.pitch_coarse > -48): clip.pitch_coarse -= 1 else: trans = 1 if (cc_value >= 64): trans = -1 notes = clip.get_selected_notes() if (len(notes) > 0): notes = self.transpose_notes(notes, trans) if (len(notes) > 0): clip.replace_selected_notes(notes) else: notes = clip.select_all_notes() notes = clip.get_selected_notes() notes = self.transpose_notes(notes, trans) if (len(notes) > 0): clip.replace_selected_notes(notes) clip.deselect_all_notes()
def validate_card(self, number): r = [int(ch) for ch in str(number)][::-1] return (sum(r[0::2]) + sum(sum(divmod(d * 2, 10)) for d in r[1::2])) % 10 == 0
def print_time(start, end): hours, rem = divmod(end-start, 3600) minutes, seconds = divmod(rem, 60) print("{:0>2}h {:0>2}m {:05.2f}s ".format(int(hours),int(minutes),seconds))
avg_iou = np.mean(iou_list) avg_val_loss = np.mean(val_loss_list) print("\nAverage validation accuracy for epoch # %04d = %f"% (epoch, avg_score)) print("Average per class validation accuracies for epoch # %04d:"% (epoch)) for index, item in enumerate(class_avg_scores): print("%s = %f" % (class_names_list[index], item)) print("Validation precision = ", avg_precision) print("Validation recall = ", avg_recall) print("Validation F1 score = ", avg_f1) print("Validation IoU score = ", avg_iou) print ("Validation logloss: ",avg_val_loss) ## val loss epoch_time=time.time()-epoch_st remain_time=epoch_time*(args.num_epochs-1-epoch) m, s = divmod(remain_time, 60) h, m = divmod(m, 60) if s!=0: train_time="Remaining training time = %d hours %d minutes %d seconds\n"%(h,m,s) else: train_time="Remaining training time : Training completed.\n" utils.LOG(train_time) scores_list = [] ###### val_print = "Epoch = %d Count = %d Current_Loss = %.4f Time = %.2f"%(epoch,len(val_loss_list),avg_val_loss,epoch_time) val_log.write("%s \n" % val_print) # save training log st = time.time() ## train_log.close() val_log.close()
def write(self, numpy_array): """ Writes raw samples to the task or virtual channels you specify. The number of samples per channel to write is determined using the following equation: number_of_samples_per_channel = math.floor( numpy_array_size_in_bytes / ( number_of_channels_to_write * raw_sample_size_in_bytes)) Raw samples constitute the internal representation of samples in a device, read directly from the device or buffer without scaling or reordering. The native format of a device can be an 8-, 16-, or 32-bit integer, signed or unsigned. If you use a different integer size than the native format of the device, one integer can contain multiple samples or one sample can stretch across multiple integers. For example, if you use 32-bit integers, but the device uses 8-bit samples, one integer contains up to four samples. If you use 8-bit integers, but the device uses 16-bit samples, a sample might require two integers. This behavior varies from device to device. Refer to your device documentation for more information. NI-DAQmx does not separate raw data into channels. It accepts data in an interleaved or non-interleaved 1D array, depending on the raw ordering of the device. Refer to your device documentation for more information. If the task uses on-demand timing, this method returns only after the device generates all samples. On-demand is the default timing type if you do not use the timing property on the task to configure a sample timing type. If the task uses any timing type other than on-demand, this method returns immediately and does not wait for the device to generate all samples. Your application must determine if the task is done to ensure that the device generated all samples. Use the "auto_start" property on the stream to specify if this method automatically starts the stream's owning task if you did not explicitly start it with the DAQmx Start Task method. Use the "timeout" property on the stream to specify the amount of time in seconds to wait for the method to write all samples. NI-DAQmx performs a timeout check only if the method must wait before it writes data. This method returns an error if the time elapses. The default timeout is 10 seconds. If you set timeout to nidaqmx.WAIT_INFINITELY, the method waits indefinitely. If you set timeout to 0, the method tries once to write the submitted samples. If the method could not write all the submitted samples, it returns an error and the number of samples successfully written. Args: numpy_array (numpy.ndarray): Specifies a 1D NumPy array that contains the raw samples to write to the task. Returns: int: Specifies the actual number of samples per channel successfully written to the buffer. """ channels_to_write = self._task.channels number_of_channels = len(channels_to_write.channel_names) channels_to_write.ao_resolution_units = ResolutionType.BITS number_of_samples_per_channel, _ = divmod( numpy_array.nbytes, (number_of_channels * channels_to_write.ao_resolution / 8)) return _write_raw(self._handle, number_of_samples_per_channel, self.auto_start, self.timeout, numpy_array)
def timer(start,end): hours, rem = divmod(end-start, 3600) minutes, seconds = divmod(rem, 60) print("Completion Time: {} Hours {} Minutes {} Seconds".format(int(hours),int(minutes),int(seconds)))
time = [] allTimes = [] for i in range(len(web)): time.append([]) allTimes.append([]) for i in range(0,len(websiteList),3): ap = 0 for j in range(len(web)): if web[j] in websiteList[i]: print(websiteList[i+1], " visited: ", websiteList[i], "\ton: ", websiteList[i+2]) time[j].append(websiteList[i+2]) for i in range(len(time)): pts = time[i] totalTime = 0 for indx,data in enumerate(pts): if(indx < (len(pts)-1)): elapsedTime = pts[indx+1] - data minVisited = divmod(elapsedTime.total_seconds(),60)[0] if(minVisited < 5): totalTime += minVisited allTimes[i] = totalTime #sudo chmod a+w /etc/squid/ban_domains.txt bansFD = open("/etc/squid/ban_domains.txt","a") for i in range(len(timeLimits)): if(allTimes[i] > timeLimits[i]): print("time exceeded for: ", web[i]) bansFD.write(ifBan[i]+'\n') bansFD.close()
def _love_you_forever(self, start=0, status=1, indent=0) -> Union[int, Tuple[int, list], list]: jump = 0 index = start # 为什么需要这行语句呢?因为程序可能他妈根本就不经过下面这个for循环! result = [] if status == 2: # 对于内部不解析的块,尤其是`block:code`,需要记录它们的空白行; more = 0 for index, line in enumerate(self.lines[start:], start): # 掠过行 if jump: jump -= 1 continue # 忽略已经被解析过的行。 line = line.rstrip() if not line: if status == 2: more += 1 # 记录内部不解析的块的空白行。 continue # 忽略空白行 # 探寻缩进等级 pos = 0 # 当前缩进空格数 back = False # 标记:是否要打回去(往回缩进) while pos < len(line) and line[pos] == ' ': pos += 1 if status == 2: # 对于内部不解析的块: pos_ = indent * 4 if pos < pos_: # 如果实际缩进小于目标缩进,那么打回去; back = True else: # 否则,超过目标缩进的空格原样保留。 pos = pos_ else: div, mod = divmod(pos, 4) if div < indent: # 实际缩进小于目标缩进,打回去; back = True elif mod != 0 or div > indent: # 实际缩进大于目标缩进,*忽略该行*。 continue # 错误缩进的处理 if back: index -= 1 if status == 0: return index # 注释不需要CONTENT(RESULT), else: return index, result # 其它则都需要。 # 0: 注释 if status == 0: continue # 直接忽略注释。 # 以下是非注释 if pos: line = line[pos:] prefix = line[0] # 获取行首字符,优化运算。 # 1: 常规 if status == 1: # List if prefix in '.0123456789?~x-:+v*': Axx = RE_QUICK_LIST.fullmatch(line) if Axx: if Axx.group(1): Qtype, Qstatus = 'u', 3 elif Axx.group(2): Qtype, Qstatus = 'o', 4 elif Axx.group(3): Qtype, Qstatus = 't', 5 Qindex, Qcontent = index, [] while Qindex < self.lines_length: Qindex, Qresult = self._love_you_forever(Qindex, Qstatus, indent) if Qresult: Qcontent.append(Qresult) else: break result.append([3, 'lst', {'typ': Qtype, 'sta': 1 if prefix == '?' else int(Axx.group(2))} if Qtype == 'o' else {'typ': Qtype}, Qcontent]) jump = Qindex - index continue # Header if prefix == '=': Axx = RE_QUICK_HEAD.fullmatch(line) if Axx: Qtype, Qheader = Axx.groups() Qtype = len(Qtype) # 其正确长度由正则表达式保护 result.append([3, 'had', {'lev': Qtype}, Qheader]) # 非顶层header不加入headers if not indent: self.headers.append((Qtype, Qheader)) continue # Separator & page break elif prefix in "%-" and 3 <= len(line) == line.count(prefix): # % 100101 # - 101101 result.append([3, ['pgb', 'sep'][(ord(prefix)>>3)&1], None, None]) continue # Anything else: Axx = RE_QUICK_ALLS.fullmatch(line) if Axx: Quick = Axx.group(2) # Config if prefix == '&' and indent == 0 and Quick: Axx = RE_ALIAS.fullmatch(Quick) if Axx: Qmodule, Qalias = Axx.groups() if Qmodule in self.ext_modules or Qmodule == 'RAINLotus': if Qmodule != 'RAINLotus': if Qalias and (Qmodule not in self.imports): # 如果一个模块设置了多个别名,那么应该只使用第一次设置的那个。 self.imports[Qalias] = Qmodule self.imported.add(Qmodule) Qindex, Qresult = self._love_you_forever(index+1, 7, indent+1) self.config[Qmodule].extend(Qresult) jump = Qindex - index # General elif prefix == '/' and Quick: Qidex = RE_IDEX.match(Quick) if Qidex: Qalias, Qmethod = Qidex.groups() if Qalias: # 过滤掉所有未导入的模块 if Qalias in self.imports: Qmodule = self.imports[Qalias] elif Qalias in self.imported: Qmodule = Qalias else: continue try: if not self.ext_modules[Qmodule][0][0](Qmethod): continue except Exception: continue try: Qstatus = 1 if self.ext_modules[Qmodule][1][0](Qmethod) else 2 except Exception: Qstatus = 2 elif Qmethod in {'image', 'audio', 'video'}: Qmodule = None Qstatus = 1 else: continue Qargx = RE_ARGX.match(Quick, Qidex.end()) if Qmodule: Qargs = ARGX2ARGS(Qargx.group(0)) else: Qargs = ARGX2ARGS(Qargx.group(0), { 'image': lambda x: x in {'src', 'alt'}, 'audio': lambda x: x in {'src', 'autoplay', 'loop', 'muted', 'preload'}, 'video': lambda x: x in {'src', 'autoplay', 'loop', 'muted', 'preload'} }[Qmethod]) Qsufx = RE_SUFX.fullmatch(Quick, Qargx.end()) Qtext = Qsufx.group(1) if Qsufx else None Qindex, Qresult = self._love_you_forever(index+1, Qstatus, indent+1) if Qalias: result.append([5, (Qmodule, Qmethod), Qargs, self._combin(Qtext, Qresult, Qstatus==2)]) else: result.append([3, { 'image': 'img', 'audio': 'aud', 'video': 'vid' }[Qmethod], Qargs, self._combin(Qtext, Qresult)]) jump = Qindex - index # Note elif prefix == '*' and Quick: Quick = Quick.lower() if Quick in ALLOWED_ARGS_NOTE: Qindex, Qresult = self._love_you_forever(index+1, 1, indent+1) result.append([3, 'not', {'typ': Quick}, Qresult]) jump = Qindex - index # Quote elif prefix == '"': Qindex, Qresult = self._love_you_forever(index+1, 1, indent+1) if Quick and Quick[:2] in {'--', '——'}: result.append([3, 'quo', {'aut': self._mesilf(Quick[2:].strip(), True)}, Qresult]) else: result.append([3, 'quo', {'aut': None}, self._combin(Quick, Qresult)]) jump = Qindex - index # Definition-list elif prefix == ':' and Quick: Qindex, Qcontent = index, [] while Qindex < self.lines_length: Qindex, Qresult = self._love_you_forever(Qindex, 6, indent) if Qresult: Qcontent.append(Qresult) else: break result.append([3, 'lst', {'typ': 'd'}, Qcontent]) jump = Qindex - index # Table elif prefix == '|': Qmode, Qheight, Qrotate = RE_TABLE.fullmatch(f' {Quick}').groups() if Quick else (None, None, None) # Qmode: 表格模式 # Qheight: 表格头部高度 # Qrotate: 是否旋转表格 Qindex, Qtable = self._love_you_forever(index+1, 2, indent+1) Qtable = filter(bool, Qtable) ### 解析表格 ### mod = Qmode.lower() if Qmode else 'quick' hei = int(Qheight) if Qheight else 1 fai = False # 统一数据格式 if mod == 'quick': fresh = map(lambda x: x[1:], csv.reader( Qtable, delimiter='|', escapechar='\\', quoting=csv.QUOTE_NONE )) elif mod == 'csv': fresh = csv.reader(Qtable) else: try: fresh = json.loads( ''.join(Qtable), parse_int=str, parse_float=str, parse_constant=str ) except json.JSONDecodeError: fai = True if isinstance(fresh, dict): try: head, align, body = fresh['head'], fresh['align'], fresh['body'] except KeyError: fai = True else: if any(map( lambda x: not isinstance(x, list) or any(map( lambda y: any(map( lambda z: not isinstance(z, str), y)), x)), (head, [align], body) )): fai = True else: hei = len(head) fresh = [] fresh.extend(head) fresh.append(align) fresh.extend(body) elif isinstance(fresh, list): if any(map( lambda x: not isinstance(x, list) or any(map( lambda y: any(map( lambda z: not isinstance(z, str), y)), x)), fresh )): fai = True else: fai = True if fai: jump = Qindex - index continue if mod != 'json': fresh = tuple(map(lambda x: tuple(map(lambda y: y.strip(), x)), fresh)) # 查找表格宽度及安全性保护及对齐控制文本 wid = 0 ava = [] ali = [] for i, row in enumerate(fresh): # 对齐控制行 if i == hei: ava.append(False) if not row: # for json ali = repeat('=') continue for j, sign in enumerate(row, 1): if sign in {'<', '=', '>'}: ali.append(sign) if j == len(row): if 0 < wid != len(row): fai = True break else: wid = len(row) elif j == len(row): if sign in {'<<<', '===', '>>>'}: ali = chain(ali, repeat(sign[0])) elif not sign: # for quick ali = repeat('=') else: fai = True break else: fai = True break if fai: break continue # 空行 if not row: ava.append(False) continue # 常规行 if row[-1] in {'<<<', '===', '>>>'}: ava.append(len(row) != 1) else: ava.append(True) if 0 < wid != len(row): fai = True break else: wid = len(row) if fai or not ali or wid == 0: jump = Qindex - index continue # 逐单元格计算 table = [] for row in compress(fresh, ava): span = 1 cache = None rowing = [] for i, cell in enumerate(row, 1): if cell in {'>', '>>>'}: if not cache: fai = True break elif cell == '>': span += 1 continue elif i == len(row): span += wid - i + 1 break if cache: rowing.append([span, cache]) cache = self._mesilf(cell, True) span = 1 else: cache = self._mesilf(cell, True) if fai: break if cache: rowing.append([span, cache]) table.append(rowing) if fai: jump = Qindex - index continue ### 这才是最终要传递的 ### result.append([ 3, 'tab', { 'hei': hei, 'rot': bool(Qrotate), 'ali': list(islice(ali, wid)) }, table ]) jump = Qindex - index # Collapse elif prefix == '~': if Quick: try: Qopen, Qsummary = Quick.split(' ', 1) if Qopen.lower() != 'open': Qopen, Qsummary = False, self._mesilf(Quick, True) else: Qopen, Qsummary = True, self._mesilf(Qsummary, True) except ValueError: if Quick.lower() == 'open': Qopen, Qsummary = True, None else: Qopen, Qsummary = False, self._mesilf(Quick, True) else: Qopen, Qsummary = False, None # Qopen: 是否默认展开 # Qsummary: 摘要 Qindex, Qresult = self._love_you_forever(index+1, 1, indent+1) result.append([3, 'col', {'opn': Qopen, 'sum': Qsummary}, Qresult]) jump = Qindex - index # Dialog elif prefix == '@': Qargx = RE_ARGX.fullmatch(f' {Quick}') if Qargx: Qargs = ARGX2ARGS(Qargx.group(0), lambda x: x in ALLOWED_ARGS_DIALOG) else: Qargs = {} Qindex, Qresult = self._love_you_forever(index+1, 10, indent+1) result.append([3, 'dia', Qargs, Qresult]) jump = Qindex - index # Footnote elif prefix == '>' and Quick: Qindex, Qresult = self._love_you_forever(index+1, 1, indent+1) result.append([3, 'fnt', {'fnt': Quick}, Qresult]) jump = Qindex - index # Code elif prefix == '`': Qindex, Qresult = self._love_you_forever(index+1, 2, indent+1) result.append([3, 'cod', {'lan': Quick.lower() if Quick else 'plaintext'}, Qresult]) jump = Qindex - index # Raw & Diagram & Formula elif prefix in '!#$': # ! 100001 # # 100011 # $ 100100 prefix = ord(prefix) >> 1 & 3 Qindex, Qresult = self._love_you_forever(index+1, 2, indent+1) result.append([3, ['raw', 'dgr', 'fml'][prefix], None, [Quick]+Qresult if Quick else Qresult]) jump = Qindex - index # Comment elif prefix == ';': Qindex = self._love_you_forever(index+1, 0, indent+1) jump = Qindex - index continue # 345: 无序/有序/Todos 列表 elif 3 <= status <= 5: Axx = RE_QUICK_LIST.fullmatch(line) if Axx: *ovo, text = Axx.groups() Qstatus = tuple(map(bool, ovo)).index(True) + 3 if status == Qstatus: Qindex, Qresult = self._love_you_forever(index+1, 1, indent+1) Qcontent = self._combin(text, Qresult) return Qindex+1, (Qcontent if status != 5 else [Axx.group(3), Qcontent]) return index-1, None # 6: 定义列表 elif status == 6: if prefix == ':': Axx = RE_QUICK_ALLS.fullmatch(line) if Axx: Qdefinition = Axx.group(2) # 列表项标号后的文本 if Qdefinition: Qindex, Qresult = self._love_you_forever(index+1, 1, indent+1) return Qindex+1, [self._mesilf(Qdefinition, True), Qresult] return index-1, None # 7: 配置解析 elif status == 7: Axx = RE_SECT.fullmatch(line) if Axx: Qcommand, Qtext = Axx.groups() Qindex, Qresult = self._love_you_forever(index+1, 2, indent+1) result.append([Qcommand, self._combin(Qtext, Qresult, True)]) jump = Qindex - index continue # 10: Dialog elif status == 10: # 0: 自己的话 # 1: 对方的话 # 2: 对方的话 - 指定名字 # 3: 系统提示 # 批注:但实际上渲染出来,2和1是会合并的。 Axx = RE_DIALOG.fullmatch(line) if Axx: Qme, Qyou, Qname, Qsys, Qmessage = Axx.groups() if Qme: Qtype = 0 Qfeat = Qme[1] elif Qyou: Qname = Qname and Qname.strip().replace('@@', '@') Qtype = bool(Qname) + 1 Qfeat = Qyou[0] elif Qsys: Qtype = 3 Qfeat = None else: continue Qargs = {} if Qtype < 3 and Qfeat != '-': # $!~? Qargs['typ'] = ('hongbao', 'failed', 'voice', 'sending')[ord(Qfeat)&3] if Qfeat in {'~', '$'}: try: Qvalue = int(Qmessage) if Qfeat == '~' else int(float(Qmessage) * 100) except (TypeError, ValueError): continue else: if (Qtype == '~' and not 2 <= Qvalue <= 60) \ or (Qtype == '$' and not 0 <= Qvalue): continue Qargs['val'] = Qvalue Qindex, Qresult = self._love_you_forever(index+1, 1, indent+1) if Qfeat not in {'~', '$'}: Qresult = self._combin(Qmessage, Qresult) if not Qresult: continue result.append([Qtype, Qresult, Qargs] + ([Qname] if Qname else [])) jump = Qindex - index continue # 2: 不解析 if status == 2: result.extend(repeat('', more)) result.append(line) more = 0 # ?: 没有什么特色块 else: result.append(self._mesilf(line)) if status == 0: return index # *注释状态*调用,只需返回新索引 elif indent == 0: return result # 表明这是顶层调用,返回结果 else: return index, result # 表明这是内部调用,返回结果和新索引
if RUN_GIS_PREPRO: for task in task_list: execute_entity_task(task, gdirs) if RUN_CLIMATE_PREPRO: for gdir in gdirs: gdir.inversion_calving_rate = 0 execute_entity_task(tasks.process_cru_data, gdirs) execute_entity_task(tasks.local_t_star, gdirs) execute_entity_task(tasks.mu_star_calibration, gdirs) if RUN_INVERSION: # Inversion tasks execute_entity_task(tasks.prepare_for_inversion, gdirs, add_debug_var=True) execute_entity_task(tasks.mass_conservation_inversion, gdirs, filesuffix='_without_calving_') # Log m, s = divmod(time.time() - start, 60) h, m = divmod(m, 60) log.info("OGGM no_calving is done! Time needed: %02d:%02d:%02d" % (h, m, s)) suf = 'config_7_' # Compute calving per k factor for gdir in gdirs: forwrite = [] # Find a calving flux. inversion.find_inversion_calving(gdir)
def getElapsedTime(start_time): elapsed_time = time.time() - start_time minutes, seconds = divmod(elapsed_time, 60) return '{}m {}s'.format(round(minutes), round(seconds))
def __str__(self): minute, second = divmod(self.seconds, 60) hour, minute = divmod(minute, 60) return ('%.2d:%.2d:%.2d' % (hour, minute, second))
def transform(value, subject): value *= subject value, r = divmod(value, 20201227) return r
def APP(self, marker): # # Application marker. Store these in the APP dictionary. # Also look for well-known application markers. n = i16(self.fp.read(2)) - 2 s = ImageFile._safe_read(self.fp, n) app = "APP%d" % (marker & 15) self.app[app] = s # compatibility self.applist.append((app, s)) if marker == 0xFFE0 and s[:4] == b"JFIF": # extract JFIF information self.info["jfif"] = version = i16(s, 5) # version self.info["jfif_version"] = divmod(version, 256) # extract JFIF properties try: jfif_unit = i8(s[7]) jfif_density = i16(s, 8), i16(s, 10) except: pass else: if jfif_unit == 1: self.info["dpi"] = jfif_density self.info["jfif_unit"] = jfif_unit self.info["jfif_density"] = jfif_density elif marker == 0xFFE1 and s[:5] == b"Exif\0": # extract Exif information (incomplete) self.info["exif"] = s # FIXME: value will change elif marker == 0xFFE2 and s[:5] == b"FPXR\0": # extract FlashPix information (incomplete) self.info["flashpix"] = s # FIXME: value will change elif marker == 0xFFE2 and s[:12] == b"ICC_PROFILE\0": # Since an ICC profile can be larger than the maximum size of # a JPEG marker (64K), we need provisions to split it into # multiple markers. The format defined by the ICC specifies # one or more APP2 markers containing the following data: # Identifying string ASCII "ICC_PROFILE\0" (12 bytes) # Marker sequence number 1, 2, etc (1 byte) # Number of markers Total of APP2's used (1 byte) # Profile data (remainder of APP2 data) # Decoders should use the marker sequence numbers to # reassemble the profile, rather than assuming that the APP2 # markers appear in the correct sequence. self.icclist.append(s) elif marker == 0xFFEE and s[:5] == b"Adobe": self.info["adobe"] = i16(s, 5) # extract Adobe custom properties try: adobe_transform = i8(s[1]) except: pass else: self.info["adobe_transform"] = adobe_transform elif marker == 0xFFE2 and s[:4] == b"MPF\0": # extract MPO information self.info["mp"] = s[4:] # offset is current location minus buffer size # plus constant header size self.info["mpoffset"] = self.fp.tell() - n + 4
def _find_label_coordinates(self, date): first_weekday_of_the_month = (date.weekday() - date.day) % 7 return divmod( (first_weekday_of_the_month - self._cal.firstweekday) % 7 + date.day, 7)
def test_file_block_generator_marker_limit(self): driver = self.create_driver() vault_id = self.create_vault_id() file_id = self.create_file_id() num_blocks = 40 driver.create_file(vault_id, file_id) block_ids = ['block_{0}'.format(id) for id in range(0, num_blocks)] # Note: the mongo DB mocking driver is hard-coded to use # 40 1024-byte blocks. block_sizes = [1024 for _ in range(0, num_blocks)] offsets = [sum(block_sizes[:x]) for x in range(0, num_blocks)] blocklist = list(zip(block_ids, block_sizes, offsets)) file_size = sum(block_sizes) # register all of the blocks for block_id, block_size, offset in blocklist: driver.register_block(vault_id, block_id, self._genstorageid(block_id), block_size) for block_id, block_size, offset in blocklist: driver.assign_block(vault_id, file_id, block_id, offset) driver.finalize_file(vault_id, file_id) limit = 3 offset = 0 outblocks = [] outoffsets = [] iterations, remainder = divmod(num_blocks, limit) for _ in range(0, iterations): page = list( driver.create_file_block_generator(vault_id, file_id, offset=offset, limit=limit)) self.assertEqual(len(page), limit) tempblocks, tempoffsets = zip(*page) outblocks.extend(tempblocks) outoffsets.extend(tempoffsets) offset = outoffsets[-1] + 1 if len(outoffsets) > 0 else None if remainder > 0: page = list( driver.create_file_block_generator(vault_id, file_id, offset=offset, limit=limit)) self.assertEqual(len(page), remainder) tempblocks, tempoffsets = zip(*page) outblocks.extend(tempblocks) outoffsets.extend(tempoffsets) self.assertEqual(list(outblocks), block_ids) self.assertEqual(list(outoffsets), offsets) # Now try to do it again, this time with a ridiculous offset value out = list( driver.create_file_block_generator(vault_id, file_id, offset=999999, limit=3)) self.assertEqual(out, [])
n = int(input('enter natural number ')) m = 0 while n > 0: (n, d) = divmod(n, 10) m = max(m, d) print('the max digit is ' + str(m))
def convert(num, base): q, r = divmod(num, base) if q == 0: return tmp[r] else: return convert(q, base) + tmp[r]
def _interval_time(self, start, end): seconds = (end - start).seconds minutes, seconds = divmod(seconds, 60) hours, minutes = divmod(minutes, 60) return "%d:%02d:%02d" % (hours, minutes, seconds)
pro.communicate() son_pro_lst = [] continue else: continue single_pro_lst = [] for chunk_file in single_lst: out_file = os.path.join(tmp_folder, 'modified_' + os.path.basename(chunk_file)) out_file_lst.append(out_file) mod_gtf_cmd = 'python {} -input_gtf {} -out {} -ref_dic {} -combined_gtf_dic_pk {} '.format( trans_script, chunk_file, out_file, ref_dic_pickle, combined_gtf_content_dic_pickle) extra = subprocess.Popen(mod_gtf_cmd, shell=True) single_pro_lst.append(extra) for extra_pro in single_pro_lst: extra_pro.communicate() out_file_str = " ".join(out_file_lst) subprocess.call('cat {} > {}'.format(out_file_str, new_file_path), shell=True) print('已将最后结果汇聚为{}'.format(new_file_path)) if rm: subprocess.call('rm -rf {}'.format(tmp_folder), shell=True) time2 = time.time() duration = time2 - time1 m, s = divmod(duration, 60) h, m = divmod(m, 60) print('整个程序运行的时间为{}h:{}m:{}s'.format(h, m, s))
def bytes(integer): return divmod(integer, 0x100)
def parse_generalized_time(timestr): """Parses are Generalized Time string (as specified in X.680), returning a datetime object. Generalized Times are stored inside the krbPasswordExpiration attribute in LDAP. This method doesn't attempt to be perfect wrt timezones. If python can't be bothered to implement them, how can we...""" if len(timestr) < 8: return None try: date = timestr[:8] time = timestr[8:] year = int(date[:4]) month = int(date[4:6]) day = int(date[6:8]) hour = min = sec = msec = 0 tzone = None if (len(time) >= 2) and re.match(r'\d', time[0]): hour = int(time[:2]) time = time[2:] if len(time) >= 2 and (time[0] == "," or time[0] == "."): hour_fraction = "." time = time[1:] while (len(time) > 0) and re.match(r'\d', time[0]): hour_fraction += time[0] time = time[1:] total_secs = int(float(hour_fraction) * 3600) min, sec = divmod(total_secs, 60) if (len(time) >= 2) and re.match(r'\d', time[0]): min = int(time[:2]) time = time[2:] if len(time) >= 2 and (time[0] == "," or time[0] == "."): min_fraction = "." time = time[1:] while (len(time) > 0) and re.match(r'\d', time[0]): min_fraction += time[0] time = time[1:] sec = int(float(min_fraction) * 60) if (len(time) >= 2) and re.match(r'\d', time[0]): sec = int(time[:2]) time = time[2:] if len(time) >= 2 and (time[0] == "," or time[0] == "."): sec_fraction = "." time = time[1:] while (len(time) > 0) and re.match(r'\d', time[0]): sec_fraction += time[0] time = time[1:] msec = int(float(sec_fraction) * 1000000) if (len(time) > 0): tzone = GeneralizedTimeZone(time) return datetime.datetime(year, month, day, hour, min, sec, msec, tzone) except ValueError: return None
def __new__(cls, hours=0, minutes=0, seconds=0, centiseconds=0): h = m = s = cs = 0 # Get rid of all fractions, and normalize s and us.. assert isinstance(h, int) h = hours assert isinstance(minutes, int) hours, minutes = divmod(minutes, 60) h += hours m = int(minutes) # can't overflow assert isinstance(m, int) assert abs(m) <= 60 # minutes isn't referenced again before redefinition if isinstance(seconds, float): secondsfrac, seconds = _math.modf(seconds) assert seconds == int(seconds) seconds = int(seconds) assert abs(secondsfrac) <= 1.0 else: secondsfrac = 0 assert isinstance(seconds, int) minutes, seconds = divmod(seconds, 60) hours, minutes = divmod(minutes, 60) h += hours m += minutes s = seconds assert isinstance(h, int) assert isinstance(m, int) assert isinstance(seconds, int) assert abs(m) <= 2 * 60 assert abs(s) < 60 csint = round(secondsfrac * 100) assert abs(csint) < 100 assert isinstance(csint, int) assert isinstance(centiseconds, int) seconds, centiseconds = divmod(centiseconds + csint, 100) minutes, seconds = divmod(seconds, 60) hours, minutes = divmod(minutes, 60) h += hours m += minutes s += seconds cs = centiseconds assert isinstance(h, int) assert isinstance(m, int) assert isinstance(s, int) assert isinstance(cs, int) assert abs(m) <= 3 * 60 assert abs(s) < 2 * 60 assert abs(cs) < 100 # Just a little bit of carrying possible for microseconds and seconds. seconds, cs = divmod(cs, 100) s += seconds minutes, s = divmod(s, 60) m += minutes hours, m = divmod(m, 60) h += hours assert isinstance(h, int) assert isinstance(m, int) and 0 <= m < 60 assert isinstance(s, int) and 0 <= s < 60 assert isinstance(cs, int) and 0 <= cs < 100 if abs(h) > 999: raise OverflowError("mytimedelta # of hours is too large: %d" % h) self = object.__new__(cls) self._hours = h self._minutes = m self._seconds = s self._centiseconds = cs self._hashcode = -1 return self
def str_coord(c): if c is None: return 'pass' row, col = divmod(c - (W+1), W) return '%c%d' % (colstr[col], N - row)
vars(str) logger.info("1+1") logger.info(eval("1+1")) # user_value = eval(input("Enter any number:")) exec("import time; logger.info(time.ctime())") exec(compile("a=1;b=20;logger.info(a+b)", "<string>", "exec")) # # ============ abs ============== # logger.info(abs(1)) # 1 logger.info(abs(-1)) # 1 # =========== divmod ============= # logger.info(divmod(4, 2)) # (4//2, 4%2 ) -> (2, 0 ) logger.info(divmod(3, 2)) # ( 3//2, 3%2) -> ( 1, 1) logger.info(3 // 2) logger.info(3 % 2) # ============= max =============== # # # help(max) logger.info(max(1, 2, 3)) # 3 logger.info(max("python")) # y logger.info(max("aython", "baresh", key=lambda value: value[1])) # logger.info(max("", default="No value")) logger.info(max("", default="empty value")) # ============== min ============== #
def run_geocode(inps): """geocode all input files""" start_time = time.time() # Prepare geometry for geocoding res_obj = resample(lookupFile=inps.lookupFile, dataFile=inps.file[0], SNWE=inps.SNWE, laloStep=inps.laloStep, processor=inps.processor) res_obj.open() # resample input files one by one for infile in inps.file: print('-' * 50+'\nresampling file: {}'.format(infile)) ext = os.path.splitext(infile)[1] atr = readfile.read_attribute(infile, datasetName=inps.dset) outfile = auto_output_filename(infile, inps) if inps.updateMode and ut.run_or_skip(outfile, in_file=[infile, inps.lookupFile]) == 'skip': print('update mode is ON, skip geocoding.') continue # read source data and resample dsNames = readfile.get_dataset_list(infile, datasetName=inps.dset) maxDigit = max([len(i) for i in dsNames]) dsResDict = dict() for dsName in dsNames: print('reading {d:<{w}} from {f} ...'.format(d=dsName, w=maxDigit, f=os.path.basename(infile))) if ext in ['.h5','.he5']: data = readfile.read(infile, datasetName=dsName, print_msg=False)[0] else: data, atr = readfile.read(infile, datasetName=dsName, print_msg=False) # keep timeseries data as 3D matrix when there is only one acquisition # because readfile.read() will squeeze it to 2D if atr['FILE_TYPE'] == 'timeseries' and len(data.shape) == 2: data = np.reshape(data, (1, data.shape[0], data.shape[1])) res_data = res_obj.run_resample(src_data=data, interp_method=inps.interpMethod, fill_value=inps.fillValue, nprocs=inps.nprocs, print_msg=True) dsResDict[dsName] = res_data # update metadata if inps.radar2geo: atr = metadata_radar2geo(atr, res_obj) else: atr = metadata_geo2radar(atr, res_obj) #if len(dsNames) == 1 and dsName not in ['timeseries']: # atr['FILE_TYPE'] = dsNames[0] # infile = None writefile.write(dsResDict, out_file=outfile, metadata=atr, ref_file=infile) m, s = divmod(time.time()-start_time, 60) print('time used: {:02.0f} mins {:02.1f} secs.\n'.format(m, s)) return outfile
def model_train(test=False): """ example funtion to train model The 'test' flag when set to 'True': (1) subsets the data and serializes a test version (2) specifies that the use of the 'test' log file The iris dataset is already small so the subset is shown as an example Note that the latest training data is always saved to be used by perfromance monitoring tools. """ ## start timer for runtime time_start = time.time() ## data ingestion X,y = load_aavail_data() preprocessor = get_preprocessor() ## subset the data to enable faster unittests if test: n_samples = int(np.round(0.9 * X.shape[0])) subset_indices = np.random.choice(np.arange(X.shape[0]),n_samples,replace=False).astype(int) mask = np.in1d(np.arange(y.size),subset_indices) y=y[mask] X=X[mask] ## Perform a train-test split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42) ## Specify parameters and model param_grid = { 'clf__n_estimators': [25,50,75,100], 'clf__criterion':['gini','entropy'], 'clf__max_depth':[2,4,6] } print("... grid searching") clf = ensemble.RandomForestClassifier() pipe = Pipeline(steps=[('pre', preprocessor), ('clf',clf)]) grid = GridSearchCV(pipe, param_grid=param_grid, cv=5, iid=False, n_jobs=-1) grid.fit(X_train, y_train) params = grid.best_params_ params = {re.sub("clf__","",key):value for key,value in params.items()} ## fit model on training data clf = ensemble.RandomForestClassifier(**params) pipe = Pipeline(steps=[('pre', preprocessor), ('clf',clf)]) pipe.fit(X_train,y_train) y_pred = pipe.predict(X_test) eval_test = classification_report(y_test,y_pred,output_dict=True) ## retrain using all data pipe.fit(X, y) if test: print("... saving test version of model") joblib.dump(pipe,os.path.join("models","test.joblib")) else: print("... saving model: {}".format(SAVED_MODEL)) joblib.dump(pipe,SAVED_MODEL) print("... saving latest data") data_file = os.path.join("models",'latest-train.pickle') with open(data_file,'wb') as tmp: pickle.dump({'y':y,'X':X},tmp) m, s = divmod(time.time()-time_start, 60) h, m = divmod(m, 60) runtime = "%03d:%02d:%02d"%(h, m, s) ## update the log file update_train_log(X.shape,eval_test,runtime, MODEL_VERSION, MODEL_VERSION_NOTE,test=test)
rospy.logwarn("# reward that action gave=>" + str(reward)) rospy.logwarn("# episode cumulated_reward=>" + str(cumulated_reward)) rospy.logwarn("# State in which we will start next step=>" + str(nextState)) qlearn.learn(state, action, reward, nextState) if not (done): rospy.logwarn("NOT DONE") state = nextState else: rospy.logwarn("DONE") last_time_steps = numpy.append(last_time_steps, [int(i + 1)]) break rospy.logwarn("############### END Step=>" + str(i)) #raw_input("Next Step...PRESS KEY") # rospy.sleep(2.0) m, s = divmod(int(time.time() - start_time), 60) h, m = divmod(m, 60) rospy.logerr(("EP: " + str(x + 1) + " - [alpha: " + str(round(qlearn.alpha, 2)) + " - gamma: " + str( round(qlearn.gamma, 2)) + " - epsilon: " + str(round(qlearn.epsilon, 2)) + "] - Reward: " + str( cumulated_reward) + " Time: %d:%02d:%02d" % (h, m, s))) rospy.loginfo(("\n|" + str(nepisodes) + "|" + str(qlearn.alpha) + "|" + str(qlearn.gamma) + "|" + str( initial_epsilon) + "*" + str(epsilon_discount) + "|" + str(highest_reward) + "| PICTURE |")) l = last_time_steps.tolist() l.sort() # print("Parameters: a="+str) rospy.loginfo("Overall score: {:0.2f}".format(last_time_steps.mean())) rospy.loginfo("Best 100 score: {:0.2f}".format(reduce(lambda x, y: x + y, l[-100:]) / len(l[-100:])))
def test(modelPath): device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # net = torch.load("./checkpoints_unet/unet_1.pt") # net = torch.load("./checkpoints_attention/aspp_4.pt") # net = torch.load("./checkpoints_attention/SpoonNetSpretral3_12.pt", map_location=torch.device('cpu')) # total_params = sum(p.numel() for p in net.parameters()) # print(total_params) # # net = UNet(n_channels=10, n_classes=2) # # print(net.state_dict().keys()) # net = net.to(device) # net = net.float() # net.eval() # net2 = torch.load("./checkpoints_unet/unet_1.pt") # total_params = sum(p.numel() for p in net2.parameters()) # print(total_params) # net2 = net2.to(device) # net2 = net2.float() # net2.eval() net3 = torch.load("./checkpoints_attention/SegNet_2.pt") total_params = sum(p.numel() for p in net3.parameters()) print(total_params) net3 = net3.to(device) net3 = net3.float() net3.eval() all_train_iter_loss = [] all_test_iter_loss = [] # start timing prev_time = datetime.now() senceDict = read_list() predEvalArray = np.zeros((8, 5)) unetEvalArray = np.zeros((8, 5)) segEvalArray = np.zeros((8, 5)) qaEvalArray = np.zeros((8, 5)) roc = np.zeros((2, 100)) for epo in range(1): train_loss = 0 acc = 0. evaluateArray = np.zeros((4)) qaArray = np.zeros((4)) # net.train() for index, (names, bag, bag_msk, qa) in enumerate(test_dataloader): # bag.shape is torch.Size([4, 10, 512, 512]) # bag_msk.shape is torch.Size([4, 1, 512, 512]) bag = bag.to(device) bag_msk = bag_msk.to(device) # qa = qa.to(device) # [output, spectral, _] = net(bag) # outputData = np.argmax(output.data, 1) # output2 = net2(bag) # outputData2 = np.argmax(output2.data, 1) output3 = net3(bag) outputData3 = np.argmax(output3.data, 1) regionSelect(bag_msk.data) # regionSelect(outputData) # regionSelect(outputData2) regionSelect(outputData3) # regionSelect(qa.data) if index % 10 == 0: print(index) for idx, name in enumerate(names): senceId = re.split('[_]', name)[0] y = bag_msk.data[idx] # y_ = outputData[idx] # tmpList = evaluate(y, y_) # predEvalArray[senceDict[senceId]] += np.array(tmpList) # y_ = outputData2[idx] # tmpList = evaluate(y, y_) # unetEvalArray[senceDict[senceId]] += np.array(tmpList) y_ = outputData3[idx] tmpList = evaluate(y, y_) segEvalArray[senceDict[senceId]] += np.array(tmpList) # qa_ = qa.data[idx] # tmpList = evaluate(y, qa_) # qaEvalArray[senceDict[senceId]] += np.array(tmpList) cur_time = datetime.now() h, remainder = divmod((cur_time - prev_time).seconds, 3600) m, s = divmod(remainder, 60) time_str = "Time %02d:%02d:%02d" % (h, m, s) print('time: %s'%(time_str)) # print(predEvalArray) # np.save('./log/spoonNetEvalArray_region.npy', predEvalArray) # np.save('./log/unetEvalArray_region.npy', unetEvalArray) np.save('./log/segEvalArray_region.npy', segEvalArray) # np.save('./log/qaEvalArray_region.npy', qaEvalArray) # showEvaluate(predEvalArray) # showEvaluate(unetEvalArray) # showEvaluate(segEvalArray) # npy2tex(unetEvalArray) npy2tex(segEvalArray)