def search(): time_one = time_clock() searching = request.form['text'] app.logger.info(searching) result = process(searching) timer = time_clock() - time_one app.logger.info(timer) if result: return render_template('index.html', result=', '.join(result)) return render_template('index.html', result='Новотворів не знайдено.')
def curl_progress(disable_progressbar, dl_state, dl_total, dl_now, ul_total, ul_now): """callback assigned to pycurl download, showing progress (if not disabled)""" if disable_progressbar: return 0 #if the downloaded size did not change we don't update. elif dl_now == dl_state.dl_prev: return 0 cur_time = time_clock() #if it is not the last update (for dl completed), we only print one every 0.3 seconds. if dl_total != dl_now and (cur_time - dl_state.prev_time) < 0.3: return 0 #just in case time has not enough accuracy elif cur_time == dl_state.prev_time: return 0 avg_speed = dl_now / (cur_time - dl_state.start_time) / 1000.0 cur_speed = (dl_now - dl_state.dl_prev) / (cur_time - dl_state.prev_time) / 1000.0 #now we update dl_state dl_state.prev_time = cur_time dl_state.dl_prev = dl_now percent = 0.0 if dl_total != 0.0: percent = float(dl_now) / float(dl_total) * 100.0 elif dl_now > 0.0: #any times the file it is so small that it gets downloaded completely before get dl_total value percent = 100.0 texto = u"\r[%s] %.2f%% downloaded %d/%d avg:%dKb/s cur:%dKb/s" %\ ("#"*(int(percent)/10), percent, dl_now, dl_total, avg_speed, cur_speed) sys.stdout.write(texto) sys.stdout.flush() return 0
def wrapper(*args, **kwargs): # arguments to the original function now = time_clock() key = (func, getattr(func, 'im_self', None)) lastcalled, caller = _delays[key] diff = now - lastcalled if diff > secs: # CALL NOW if isinstance(caller, wx.CallLater): caller.Stop() _delays[key] = (now, None) return func(*args, **kwargs) else: # CALL LATER if caller == 'pending': # the wx.CallAfter hasn't completed yet. pass elif not caller: callin_ms = (lastcalled + secs - now) * 1000 def later(): def muchlater(): _delays[key] = (time_clock(), None) func(*args, **kwargs) _delays[key] = (lastcalled, wx.CallLater(max(1, callin_ms), muchlater)) _delays[key] = (lastcalled, 'pending') wx.CallAfter(later) return func
def download_resource(url, filename, rate_limit, cookies_filename, my_logger, dl_state, disable_progressbar): """this function use pycurl to download the file""" my_logger.debug("Downloading '%s' from '%s' ...\n" % (filename, url)) print "Downloading '%s'" % dl_state.internal_path curl = pycurl.Curl() curl.setopt(curl.URL, url) if rate_limit is not None: curl.setopt(curl.MAX_RECV_SPEED_LARGE, rate_limit) file_store = open(filename, "wb") curl.setopt(curl.WRITEDATA, file_store) # we ever activate progress handling in libcurl to permits the usage of ctrl+c to abort # our function curl_progress check that it is not disabled before print it. curl.setopt(curl.NOPROGRESS, 0) curl.setopt(curl.PROGRESSFUNCTION, functools.partial(curl_progress, disable_progressbar, dl_state)) curl.setopt(curl.FOLLOWLOCATION, 1) #needed for videos. #needed in windows because libcurl don't use the certificates from browsers. if sys.platform.startswith("win"): curl.setopt(curl.SSL_VERIFYPEER, 0) #cookies curl.setopt(curl.COOKIEJAR, cookies_filename) curl.setopt(curl.COOKIEFILE, cookies_filename) #we set start time and prev_time dl_state.start_time = dl_state.prev_time = time_clock() try: curl.perform() except: import traceback my_logger.error(u"Error downloading file: %s" % traceback.format_exc()) #cleaning curl.close() file_store.close() print "\n" #change line for progress messages return
# python3 factorize_main.py factorize2 if __name__ == '__main__': # First parse command line arguments and figure out which # function we want to test if len(sys.argv) <= 1: fun = factorize4 else: fun_to_call_string = sys.argv[1] assert fun_to_call_string in globals(), ('You did not implement ' + fun_to_call_string) globals_copy = globals().copy() fun = globals_copy.get(fun_to_call_string) # Open the file with list of numbers f = open('composite_list.txt', 'r') # test each number for line in f: n = int(line) print('Factoring', n, '(', len(line), 'digits ): ', end='') t1 = time_clock() # Record time p = fun(n) t2 = time_clock() # Record time time_elapsed = t2 - t1 # seconds print('Factor = ', p, ' other factor = ', n / p, ' Time Elapsed: ', time_elapsed) if n % p != 0: print('Factorization failed for: ', n) sys.exit(1) f.close()
def main(): description = """RMA is used to scan Redis key space in and aggregate memory usage statistic by key patterns.""" parser = ArgumentParser(prog='rma', description=description, formatter_class=parser_formatter) parser.add_argument("-s", "--server", dest="host", default="127.0.0.1", help="Redis Server hostname. Defaults to 127.0.0.1") parser.add_argument("-p", "--port", dest="port", default=6379, type=int, help="Redis Server port. Defaults to 6379") parser.add_argument("-a", "--password", dest="password", help="Password to use when connecting to the server") parser.add_argument("-d", "--db", dest="db", default=0, help="Database number, defaults to 0") parser.add_argument("-m", "--match", dest="match", default="*", help="Keys pattern to match") parser.add_argument("-l", "--limit", dest="limit", default="0", type=int, help="Get max key matched by pattern") parser.add_argument( "-b", "--behaviour", dest="behaviour", default="all", help="Specify application working mode. Allowed values are " + ', '.join(VALID_MODES)) parser.add_argument( "-t", "--type", dest="types", action="append", help= """Data types to include. Possible values are string, hash, list, set. Multiple types can be provided. If not specified, all data types will be returned. Allowed values are """ + ', '.join(VALID_TYPES)) parser.add_argument("-f", "--format", dest="format", default="text", help="Output type format: json or text (by default)") parser.add_argument("-x", "--separator", dest="separator", default=":", help="Specify namespace separator. Default is ':'") options = parser.parse_args() filters = {} if options.behaviour: if options.behaviour not in VALID_MODES: raise Exception( 'Invalid behaviour provided - %s. Expected one of %s' % (options.behaviour, (", ".join(VALID_TYPES)))) else: filters['behaviour'] = options.behaviour if options.types: filters['types'] = [] for x in options.types: if x not in VALID_TYPES: raise Exception( 'Invalid type provided - %s. Expected one of %s' % (x, (", ".join(VALID_TYPES)))) else: filters['types'].append(x) app = RmaApplication(host=options.host, port=options.port, db=options.db, password=options.password, match=options.match, limit=options.limit, filters=filters, format=options.format, separator=options.separator) start_time = time_clock() app.run() sys.stderr.write("\r\nDone in %s seconds" % (time_clock() - start_time))
def get_trace(self): """Conduct the data acquisation. *** BUFFER ALLOCATION: AlazarTech digitizers use direct memory access (DMA) to transfer data from digitizers to the computer's main memory. The class 'DMABuffer' abstracts a memory buffer on the host, and ensures that all the requirements for DMA transfers are met. *** ARGUMENTS: c_sample_type (ctypes type): The datatype of the buffer to create. size_bytes (int): The size of the buffer to allocate, in bytes. The current acquisation mode uses AutoDMA. This allows a board to capture sample data to on-board dual-port memory while – at the same time – transferring sample data from the on-board memory to a buffer in host memory. Data acquisition and data transfer are done in parallel, so trigger events that occur while the board is transferring data will not be missed. If an application is unable to supply buffers as fast a board fills them, the board will run out of buffers into which it can transfer sample data. The board can continue to acquire data until it fills is on-board memory, but then it will abort the acquisition and report a buffer overflow error. The minimum number of buffers to be allocated for dual-port acquisition is two. However, due to the reasons stated above, it is recommended that an application supply three or more buffers to a board. This allows some tolerance for operating system latencies. """ ### No pre-trigger samples in NPT mode available preTriggerSamples = 0 postTriggerSamples = int( self._local['N_sweep_points']) ### (Multpile of 64!) ### Select the number of records per acquisition #recordsPerAcquisition = 100000 ###TODO ### Select the number of records per DMA buffer. recordsPerBuffer = int(self._local['N_records_per_buffer']) ### Select the number of buffers per acquisition. buffersPerAcquisition = int(self._local['N_buffers_per_acquisition']) ### Select the active channels. k = 'F_use_channel' channels = menus.ATS9870[k][self._local[k]] channelCount = 0 for c in ats.channels: channelCount += (c & channels == c) ### Compute the number of bytes per record and per buffer ### Returns on-board memory size in samples per channel and number of bits per sample memorySize_samples, bitsPerSample = self.Resource.getChannelInfo() ### Turns bits per sample into bytes per sample bytesPerSample = (bitsPerSample.value + 7) // 8 ### Number of samples per record samplesPerRecord = preTriggerSamples + postTriggerSamples ### Number of bytes per record bytesPerRecord = bytesPerSample * samplesPerRecord ### Number of bytes per buffer bytesPerBuffer = bytesPerRecord * recordsPerBuffer * channelCount ### Select number of DMA buffers to allocate bufferCount = 4 ### (*) bufferList = [[] for _ in range(channelCount)] ### Allocate DMA buffers sample_type = ctypes.c_uint8 if bytesPerSample > 8: sample_type = ctypes.c_uint16 buffers = [] for _ in range(bufferCount): buffers.append(ats.DMABuffer(sample_type, bytesPerBuffer)) ### Set the record size self.Resource.setRecordSize(preTriggerSamples, postTriggerSamples) recordsPerAcquisition = recordsPerBuffer * buffersPerAcquisition ### Configure the board to make an NPT AutoDMA acquisition self.Resource.beforeAsyncRead( channels, -preTriggerSamples, samplesPerRecord, recordsPerBuffer, recordsPerAcquisition, ats.ADMA_EXTERNAL_STARTCAPTURE | ats.ADMA_NPT) ### Post DMA buffers to board for buff in buffers: self.Resource.postAsyncBuffer(buff.addr, buff.size_bytes) ### Keep track of when acquisition started start = time_clock() try: self.Resource.startCapture() # Start the acquisition print("Capturing %d buffers. Press <enter> to abort" % buffersPerAcquisition) buffersCompleted = 0 bytesTransferred = 0 while (buffersCompleted < buffersPerAcquisition and not ats.enter_pressed()): ### Wait for the buffer at the head of the list of available ### buffers to be filled by the board. buff = buffers[buffersCompleted % len(buffers)] self.Resource.waitAsyncBufferComplete(buff.addr, timeout_ms=5000) buffersCompleted += 1 bytesTransferred += buff.size_bytes """Process sample data in the buffer here. Data is available as a NumPy array at buff.buffer While you are processing this buffer, the board is already filling the next available buffer. You MUST finish processing this buffer and post it back to the board before the board fills all of its available DMA buffers and on-board memory. """ ### Split Channel Readout channelSplit = np.split(buff.buffer, channelCount) ### Calculate each channel individually for chan in range(channelCount): ### Calculate mean values bufferSplit = np.split(channelSplit[chan], recordsPerBuffer) bufferMean = np.mean(bufferSplit, 0) ### Append result to bufferList bufferList[chan].append(bufferMean) ### Add the buffer to the end of the list of available buffers. self.Resource.postAsyncBuffer(buff.addr, buff.size_bytes) finally: self.Resource.abortAsyncRead() ### OPTIONAL: Display of total transfer time and performance information. transferTime_sec = time_clock() - start print("Capture completed in %f sec" % transferTime_sec) buffersPerSec = 0 bytesPerSec = 0 recordsPerSec = 0 if transferTime_sec > 0: buffersPerSec = buffersCompleted / transferTime_sec bytesPerSec = bytesTransferred / transferTime_sec recordsPerSec = recordsPerBuffer * buffersCompleted / transferTime_sec print("Captured %d buffers (%f buffers per sec)" % (buffersCompleted, buffersPerSec)) print("Captured %d records (%f records per sec)" % (recordsPerBuffer * buffersCompleted, recordsPerSec)) print("Transferred %d bytes (%f Gbytes per sec)" % (bytesTransferred, bytesPerSec / 1e9)) ### Calculate mean value of buffer data bufferListMean = np.mean(bufferList, 1) #### Conversion from 8-bit to volts & split into channels for the return codeZero = (1 << (bitsPerSample.value - 1)) - 0.5 codeRange = (1 << (bitsPerSample.value - 1)) - 0.5 channelRange = [ float(self._local[k]) for k in ['F_channelA_range', 'F_channelB_range'] ] #channelCoupling = [self._local[k] for k in ['F_channelA_coupling', 'F_channelB_coupling']] y_axis = [[], []] for chan in range(channelCount): y_axis[chan] = channelRange[chan] * (bufferListMean[chan] - codeZero) / codeRange ### make sure y_axis[0] and y_axis[1] have the same format when returned. if (channelCount != 2): y_axis[1] = y_axis[0] ### IQ transformation ## Definitions i = [] q = [] ## Constants omega = 2 * np.pi * int( self._local['R_intermediate_frequency']) * 1e6 * 1e-9 ## Create x-Axis chx = np.array( range(0, postTriggerSamples * int(self._local['F_decimation']), int(self._local['F_decimation']))) for el in range(postTriggerSamples): t = chx[el] i = np.append( i, np.dot( [np.cos(omega * t), np.sin(omega * t)], [y_axis[0][el], y_axis[1][el]])) q = np.append( q, np.dot( [-np.sin(omega * t), np.cos(omega * t)], [y_axis[0][el], y_axis[1][el]])) filter_freq = float(self._local['R_filter_frequency']) if filter_freq: import interface.auxiliary_functions as auxi mov_av_time = int(1 / (filter_freq * 10**-3)) #ss_dict[index]['axis'] = auxi.custom_axis(start+mov_av_time/2, stop-mov_av_time/2, sweep_points-mov_av_time, axis_mode) i = auxi.running_mean(i, mov_av_time) q = auxi.running_mean(q, mov_av_time) print 'I and Q lengths' print len(i) print len(q) return i, q, None, None
def muchlater(): _delays[key] = (time_clock(), None) func(*args, **kwargs)