def login(str1, str2): """Validation of login interface""" data = db.get_data(str1) if data[1] == str1 and data[2] == str2: main.main() else: messagebox.showinfo("Login faild", "wrong username/password")
def launch_testrig(self): self.stop_proc() # Import and run the startup script, further execution of this script is halted until the run_loop is stopped. import base base.main() # Reset mode & restart P-ROC / pyprocgame self.mode_started() self.restart_proc()
def load(): import base base.main(dp) import text_parse text_parse.main(dp) import utils utils.main(dp) import vortaro vortaro.main(dp) import xkcd xkcd.main(dp)
def start(apis): apiss = init(apis) counter = 0 while True: try: for api in apiss: print("\n" + api, apiss[api]["price_increase"], apiss[api]["interval"], apiss[api]["candles"]) new_orders = main(apiss[api]["client"], apiss[api]["orders"], apiss[api]["price_increase"], apiss[api]["interval"], apiss[api]["candles"]) apiss[api]["orders"] = new_orders sleep(21) counter += 1 if counter >= 40: print("\nrestarting\n") break except Exception as e: print(f"\n\nmajor error\n\n{e}\n\n") sleep(61) start(apis) start(apis)
def main(config_name, *args, **kwargs): print('load config') # create controller c = base.main(*args, **kwargs) # set configuration if not os.path.isdir(config_name): for chip_key, chip in c.chips.items(): print('loading', config_name) chip.config.load(config_name) else: # load all configurations for chips for chip_key, chip in c.chips.items(): config_files = sorted( glob.glob( os.path.join(config_name, config_format.format(chip_key=chip_key)))) if config_files: print('loading', config_files[-1]) chip.config.load(config_files[-1]) # write configuration for chip_key, chip in c.chips.items(): c.write_configuration(chip_key) c.write_configuration(chip_key) # verify for chip_key in c.chips: ok, diff = c.verify_configuration(chip_key) if not ok: print('config error', diff) return c
def main(*args, **kwargs): print('base warm config') # create controller c = base.main(*args, **kwargs) # set configuration for chip_key, chip in c.chips.items(): chip.config.ibias_buffer = 3 chip.config.ibias_tdac = 5 chip.config.ibias_comp = 5 chip.config.ibias_csa = 7 chip.config.ref_current_trim = 15 chip.config.vref_dac = 185 chip.config.vcm_dac = 41 # write configuration registers = [74, 75, 76, 77] # ibias registers += [81] # ref current registers += [82, 83] # vXX_dac c.write_configuration(chip_key, registers) c.write_configuration(chip_key, registers) # verify for chip_key in c.chips: ok, diff = c.verify_configuration(chip_key) if not ok: print('config error', diff) return c
def run(): G = [] for _ in xrange(1): G.append(haupt()) G.insert(0, 'RF ') # print base.main()+[G] sk.rdivDemo(base.main() + [G]) set_trace()
def _reset_and_reload(c, controller_config): print('start resetting and reloading configs') c_new = base.main(controller_config_file=controller_config) for chip_key in c.chips: print('load config', chip_key) c_new[chip_key].config = c[chip_key].config c.write_configuration(chip_key) ok, diff = c.verify_configuration(chip_key, timeout=0.1) if not ok: print('config error', diff[chip_key]) print('done resetting and reloading configs') return c_new
def save_pic(): global animator timestamp = int(time.time()) artname = request.json['artname'] filename = '{}/{}_{}.rgb'.format(DIR, timestamp, artname) with open(filename, 'w') as f: for i in range(32): # 32 pixelz for col in request.json['pic']: f.write(col[i]) f.write('\n') if animator: animator.stopThread() animator = base.main(filename) return '', 201
def main(controller_config=None, channel=0, runtime=12): print('csa bypass') # create controller c = base.main(controller_config_file=controller_config, logger=True) # set configuration print('channel', channel) for chip_key, chip in c.chips.items(): chip.config.external_trigger_mask[channel] = 0 chip.config.channel_mask[channel] = 0 chip.config.enable_hit_veto = 0 chip.config.csa_bypass_enable = 1 chip.config.csa_bypass_select[channel] = 1 registers = list() registers += chip.config.register_map['external_trigger_mask'] registers += chip.config.register_map['channel_mask'] registers += chip.config.register_map['enable_hit_veto'] registers += chip.config.register_map['csa_bypass_enable'] registers += chip.config.register_map['csa_bypass_select'] # write c.write_configuration(chip_key, registers) # verify ok, diff = c.verify_configuration() if not ok: print('config error', diff) # take data print('taking test data...') c.run(0.5, 'test') print(c.reads[-1]) print('received packets:', len(c.reads[-1])) print('taking full data...') print('file: ', c.logger.filename) c.logger.enable() c.run(runtime, 'data') print('received packets:', len(c.reads[-1])) c.logger.flush() c.logger.disable() return c
def main(channel0=0, channel1=1, runtime=12): print('dual csa bypass') # create controller c = base.main(logger=True) # set configuration print('channels', channel0, channel1) c['1-1-1'].config.external_trigger_mask[channel0] = 0 c['1-1-1'].config.external_trigger_mask[channel1] = 0 c['1-1-1'].config.channel_mask[channel0] = 0 c['1-1-1'].config.channel_mask[channel1] = 0 c['1-1-1'].config.enable_hit_veto = 0 c['1-1-1'].config.csa_bypass_enable = 1 c['1-1-1'].config.csa_bypass_select[channel0] = 1 c['1-1-1'].config.csa_bypass_select[channel1] = 1 # write and verify c.write_configuration('1-1-1') ok, diff = c.verify_configuration('1-1-1') if not ok: print('config error', diff) # take data print('taking test data...') c.run(0.5, 'test') print(c.reads[-1]) print('received packets:', len(c.reads[-1])) print('taking full data...') print('file: ', c.logger.filename) c.logger.enable() c.run(runtime, 'data') print('received packets:', len(c.reads[-1])) c.logger.flush() c.logger.disable() return c
dt = (p - self.tacc) / self.r.total #print nome, dt line = Line(q, t0, dt, cor) if time.time() < tempo + self.tacc: #print self.t(), tempo, self.tacc self.draw_seg(cr, line, '0', True) else: self.r.acoes.pop(0) #print "%s, %s, %s, %s" % t0, dt, self.lines.append((p - self.ta, line)) self.tacc += tempo for line in self.lines: self.draw_seg(cr, line[1], "%.2f" % line[0], self.r.acoes) cr.pop_group_to_source() cr.paint() plot = Plot() def timeout(): plot.queue_draw() return True gobject.timeout_add(16, timeout) base.main(plot, plot.set_r)
import json import tornado.web import apymongo from apymongo import json_util import base class FindOneHandler(tornado.web.RequestHandler): """ finds a single record """ @tornado.web.asynchronous def get(self): conn = apymongo.Connection() coll = conn['testdb']['testcollection'] coll.find_one(callback=self.handle) def handle(self, response): self.write(json.dumps(response, default=json_util.default)) self.finish() if __name__ == "__main__": base.main(FindOneHandler)
# use two sounds like two independent channels # its not as good as HRTF but can fake that # sound source moves if you really want to believe :) lvoice.gain = (sin(t) + 1.0) / 2.0 rvoice.gain = (cos(t) + 1.0) / 2.0 def file_handle(filename, ext): sound = core.Sound(filename, ext) lvoice = core.Voice(sound, loop=True) rvoice = core.Voice(sound, loop=True) lvoice.play() rvoice.play() lvoice.pan = -1.0 rvoice.pan = 1.0 try: while True: pan_voices(lvoice, rvoice, time.time()) core.update() time.sleep(0.001) except KeyboardInterrupt: print("interrupted") if __name__ == "__main__": main(file_handle)
"""Starts a simple application development server.""" # Application import base import socketio from uweb3.sockets import Uweb3SocketIO def websocket_routes(sio): @sio.on("connect") def test(sid, env): print("WEBSOCKET ROUTE CALLED: ", sid, env) def main(): sio = socketio.Server() # websocket_routes(sio) return sio if __name__ == '__main__': sio = main() Uweb3SocketIO(base.main(sio), sio) # # Application # import base # def main(): # app = base.main() # app.serve() # if __name__ == '__main__': # main()
# larpix imports import larpix import larpix.io import base # defining defaults _default_controller_config = None parser = argparse.ArgumentParser() parser.add_argument('--controller_config', default=_default_controller_config, type=str, help='''Hydra network configuration file''') args = parser.parse_args() # Creating controller from base. c = base.main(**vars(args), logger=False) print('\n----------------------------------------') print('Human, You have started the analog monitor.') print('----------------------------------------\n') Condition = True while Condition: print('\n') _chip_key = input("Enter the chip key: ") _channel = int(input("Enter a channel number: ")) print("Configuring the channel") # This allows the monitor to reset. try: c[_chip_key].config.enable_periodic_reset = 1
import apymongo from apymongo import json_util import base class FindOneHandler(tornado.web.RequestHandler): """ finds a single record """ @tornado.web.asynchronous def get(self): conn = apymongo.Connection() coll = conn['testdb']['testcollection'] coll.find_one(callback=self.handle) def handle(self,response): self.write(json.dumps(response,default=json_util.default)) self.finish() if __name__ == "__main__": base.main(FindOneHandler)
import tornado.web import apymongo from apymongo import json_util import base class CountHandler(tornado.web.RequestHandler): """ Counts elements of the "testdb.testcollection" database. """ @tornado.web.asynchronous def get(self): conn = apymongo.Connection() coll = conn['testdb']['testcollection'] coll.count(callback=self.handle) def handle(self,response): self.write(json.dumps(response,default=json_util.default)) self.finish() if __name__ == "__main__": base.main(CountHandler)
# GNU General Public License for more details. # # # # You should have received a copy of the GNU General Public License # # along with this program. If not, see <http://www.gnu.org/licenses/>. # # # ############################################################################### # relax module imports. from base import cluster, main, NUM_SPINS_CLUSTER, NUM_SPINS_SINGLE, single, Profile from lib.dispersion.variables import EXP_TYPE_CPMG_SQ, MODEL_TSMFK01 # Setup. SINGLE = Profile(exp_type=[EXP_TYPE_CPMG_SQ], num_spins=NUM_SPINS_SINGLE, model=MODEL_TSMFK01, r2a=5.0, dw=3.0, k_AB=10.0, spins_params=['r2a', 'dw', 'k_AB']) CLUSTER = Profile(exp_type=[EXP_TYPE_CPMG_SQ], num_spins=NUM_SPINS_CLUSTER, model=MODEL_TSMFK01, r2a=5.0, dw=3.0, k_AB=10.0, spins_params=['r2a', 'dw', 'k_AB']) # Execute main function. if __name__ == "__main__": main()
""" Streams the results of "find". """ @tornado.web.asynchronous def get(self): self.writing = False self.write('[') conn = apymongo.Connection() coll = conn['testdb']['testcollection'] cursor = coll.find(callback=self.handle, processor=self.stream_processor) cursor.loop() def handle(self, response): self.write(']') self.finish() def stream_processor(self, r, collection): self.write((',' if self.writing else '') + json.dumps(r, default=json_util.default)) self.flush() if not self.writing: self.writing = True if __name__ == "__main__": base.main(StreamHandler)
import base class FindHandler(tornado.web.RequestHandler): """ Returns all records in the testdb.testcollection collection. Notice the use of the "loop" method. """ @tornado.web.asynchronous def get(self): conn = apymongo.Connection() coll = conn['testdb']['testcollection'] cursor = coll.find(callback=self.handle) cursor.loop() def handle(self,response): self.write(json.dumps(response,default=json_util.default)) self.finish() if __name__ == "__main__": base.main(FindHandler)
def main(): app = base.main() app.serve()
import json import tornado.web import apymongo from apymongo import json_util import base class CountHandler(tornado.web.RequestHandler): """ Counts elements of the "testdb.testcollection" database. """ @tornado.web.asynchronous def get(self): conn = apymongo.Connection() coll = conn['testdb']['testcollection'] coll.count(callback=self.handle) def handle(self, response): self.write(json.dumps(response, default=json_util.default)) self.finish() if __name__ == "__main__": base.main(CountHandler)
class StreamHandler(tornado.web.RequestHandler): """ Streams the results of "find". """ @tornado.web.asynchronous def get(self): self.writing = False self.write("[") conn = apymongo.Connection() coll = conn["testdb"]["testcollection"] cursor = coll.find(callback=self.handle, processor=self.stream_processor) cursor.loop() def handle(self, response): self.write("]") self.finish() def stream_processor(self, r, collection): self.write(("," if self.writing else "") + json.dumps(r, default=json_util.default)) self.flush() if not self.writing: self.writing = True if __name__ == "__main__": base.main(StreamHandler)
fmt = "%(asctime)s %(levelname)s %(name)s %(message)s" basicConfig(level=DEBUG, format=fmt) logger = getLogger(__name__) def get_hash(filepath): from os import path import hashlib basename = path.basename(filepath) # logger.debug(str(filepath) + " => " + basename) hash = hashlib.sha256() hash.update(basename.encode('utf-8')) return hash.hexdigest() # hashする必要はないけれども... if __name__ == "__main__": import argparse parser = argparse.ArgumentParser(description='重複ファイル削除プログラム') parser.add_argument('targets', nargs='+', help="target directory, not recurcive") parser.add_argument('--dry-run', action='store_true', dest="dryrun", help='削除を実行せずに、メッセージのみ表示します。') args = parser.parse_args() logger.debug(args) main(args, get_hash)
def __init__(self, **kwargs): """ Setup the App to start """ # Set 'service' instance from GMail API self.service = b.main()
to_insert = { "testkey1": 22, "testkey2": [2, 3], "testkey3": { "inner1": 2, "inner2": 'testval' } } coll.insert(to_insert, callback=self.count_handler) def count_handler(self, response): def callback(r): self.final_handler(response, r) coll = self.connection['testdb']['__ASYNCTEST3__'] coll.count(callback=callback) def final_handler(self, rec_id, response): msg = "You just inserted record " + str( rec_id) + '. There are now ' + str(response) + ' records.' self.write(msg) self.finish() if __name__ == "__main__": base.main(InsertHandler)
if os.path.isdir('.evental'): print( 'Evental Error: .evental must be a file instead of a directory.' ) os._exit() with open('.evental', 'r') as f: fileString = f.read() fps = fileString.strip().split(' ') for fp in fps: fp = fp.strip() executable = True if not fileExist(fp) or os.path.isdir(fp): print('Evental Error: File {} does not exist.' + fp) executable = False if executable: base.main(fps) else: if command == 'newProject' or command == 'newModule': print('Evental Error: Command ' + command + ' need a name') os._exit(0) else: helpHint() if argc == 3: command = argv[1] name = argv[2] if command == 'newProject': if fileExist(name): print( 'File/Directory already exists. Find another name for you project.' ) os._exit(0)
def main(controller_config=None, chip_key=None, channels=_default_channels, disabled_channels={None:_default_disabled_channels}.copy(), runtime=_default_runtime, target_rate=_default_target_rate, base_config=_default_config, disable_threshold=_default_disable_threshold, reset_threshold=_default_reset_threshold): print('START AUTOCONFIG') # create controller c = base.main(controller_config=controller_config) print() print('base config',base_config) print('enabled channels',channels) print('disabled channels',disabled_channels) print('target rate',target_rate) print('disable threshold',disable_threshold) print('runtime',runtime) test_chip_keys = [] for io_group in c.network: for io_channel in c.network[io_group]: test_chip_ids = [chip_id for chip_id,deg in c.network[io_group][io_channel]['miso_us'].out_degree() if deg == 0] # get network leaves test_chip_keys += [larpix.Key(io_group,io_channel,chip_id) for chip_id in test_chip_ids] print('test packets will be sent to',test_chip_keys) read_config_spec = [(key,0) for key in test_chip_keys] chips_to_configure = c.chips if not chip_key is None: chips_to_configure = [chip_key] _default_ignore = defaultdict(list) for chip_key in chips_to_configure: if None in disabled_channels: _default_ignore[chip_key] += disabled_channels[None] if "All" in disabled_channels: _default_ignore[chip_key] += disabled_channels["All"] if chip_key in disabled_channels: _default_ignore[chip_key] += disabled_channels[chip_key] channels_to_configure = defaultdict(list, [(chip_key,channels.copy()) for chip_key in chips_to_configure]) print() for chip_key in chips_to_configure: c.io.double_send_packets = True print('set config',chip_key) c[chip_key].config.load(_default_config) for channel in channels: if channel not in _default_ignore[chip_key]: c[chip_key].config.channel_mask[channel] = 0 for channel in _default_ignore[chip_key]: c[chip_key].config.csa_enable[channel] = 0 # write configuration print('verify',chip_key) c.write_configuration(chip_key) base.flush_data(c) #ok, diff = c.verify_configuration(chip_key, timeout=0.1) ok,diff = c.enforce_configuration(chip_key,timeout=0.01,n=10,n_verify=10) if not ok: print('config error',diff[chip_key]) base.flush_data(c) c.io.double_send_packets = True # verify no high rate channels repeat = True while repeat: print('check rate',chip_key,end=' ') repeat = False base.flush_data(c) c.multi_read_configuration(read_config_spec,timeout=runtime/10,message='rate check') triggered_channels = c.reads[-1].extract('chip_key','channel_id',chip_key=chip_key,packet_type=0) print('(total rate={}Hz)'.format(len(triggered_channels)/(runtime/10))) rates = dict([(channel, triggered_channels.count(list(channel))/(runtime/10)) for channel in set(map(tuple,triggered_channels))]) if rates: max_rate = max(rates.values()) for channel,rate in rates.items(): chip_key,channel = channel if rate > target_rate and channel in channels_to_configure[chip_key] \ and chip_key in c.chips and rate == max_rate: print('disable',chip_key,channel,'rate was',rate,'Hz') c.disable(chip_key,[channel]) c[chip_key].config.csa_enable[channel] = 0 c.write_configuration(chip_key,'csa_enable') channels_to_configure[chip_key].remove(channel) repeat = True if rate > reset_threshold: c = _reset_and_reload(c,controller_config) if repeat: c.write_configuration(chip_key) c.reads = [] # walk down global threshold print() print('reducing global threshold') repeat = defaultdict(lambda : True, [(key, True) for key in chips_to_configure]) target_reached = False while any(repeat.values()) or not len(repeat.values()): # check rate print('check rate',end=' ') base.flush_data(c) c.multi_read_configuration(read_config_spec,timeout=runtime,message='rate check') triggered_channels = c.reads[-1].extract('chip_key','channel_id',packet_type=0) print('(total rate={}Hz)'.format(len(triggered_channels)/runtime)) for chip_key, channel in set(map(tuple,triggered_channels)): rate = triggered_channels.count([chip_key,channel])/runtime if rate > target_rate and channel in channels_to_configure[chip_key] \ and repeat[chip_key] and chip_key in c.chips: print('reached target',chip_key,channel,'rate was',rate,'Hz') target_reached = True repeat[chip_key] = False c[chip_key].config.threshold_global = min(c[chip_key].config.threshold_global+1,255) print('\tthreshold',c[chip_key].config.threshold_global) c.write_configuration(chip_key,'threshold_global') c.write_configuration(chip_key,'threshold_global') if rate > reset_threshold: c = _reset_and_reload(c,controller_config) # walk down global threshold if not target_reached: print('reducing thresholds') for chip_key in chips_to_configure: if chip_key in c.chips: if repeat[chip_key] and c[chip_key].config.threshold_global > 0: c[chip_key].config.threshold_global -= 1 repeat[chip_key] = True elif c[chip_key].config.threshold_global == 0: repeat[chip_key] = False c.write_configuration(chip_key,'threshold_global') c.write_configuration(chip_key,'threshold_global') target_reached = False c.reads = [] print('initial global thresholds:',dict([(chip_key,c[chip_key].config.threshold_global) for chip_key in chips_to_configure if chip_key in c.chips])) print() print('increasing global threshold') above_target = defaultdict(lambda : False) for _ in range(10): # check rate print('check rate',end=' ') base.flush_data(c) c.multi_read_configuration(read_config_spec,timeout=runtime,message='rate check') triggered_channels = c.reads[-1].extract('chip_key','channel_id',packet_type=0) print('(total rate={}Hz)'.format(len(triggered_channels)/runtime)) for chip_key, channel in set(map(tuple,triggered_channels)): rate = triggered_channels.count([chip_key,channel])/runtime if rate > target_rate and channel in channels_to_configure[chip_key] \ and not above_target[chip_key] and chip_key in c.chips: print('increasing threshold',chip_key,channel,'rate was',rate,'Hz') above_target[chip_key] = True c[chip_key].config.threshold_global = min(c[chip_key].config.threshold_global+1,255) print('\tthreshold',c[chip_key].config.threshold_global) c.write_configuration(chip_key,'threshold_global') c.write_configuration(chip_key,'threshold_global') if rate > reset_threshold: c = _reset_and_reload(c,controller_config) # continue once rate is below target if not above_target or not any(above_target.values()): break else: above_target = defaultdict(lambda : False) c.reads = [] print('final global thresholds:',dict([(chip_key,c[chip_key].config.threshold_global) for chip_key in chips_to_configure if chip_key in c.chips])) print() print('decreasing pixel trim') repeat = defaultdict(lambda : True, [((key, channel),True) for key,channels in channels_to_configure.items() for channel in channels]) target_reached = False while any(repeat.values()) or not len(repeat.values()): # check rate print('check rate',end=' ') base.flush_data(c) c.multi_read_configuration(read_config_spec,timeout=runtime,message='rate check') triggered_channels = c.reads[-1].extract('chip_key','channel_id',packet_type=0) print('(total rate={}Hz)'.format(len(triggered_channels)/runtime)) for chip_key, channel in set(map(tuple,triggered_channels)): rate = triggered_channels.count([chip_key,channel])/runtime if rate > target_rate and channel in channels_to_configure[chip_key] \ and chip_key in c.chips: print('reached target',chip_key,channel,'rate was',rate,'Hz') if repeat[(chip_key, channel)]: target_reached = True if c[chip_key].config.pixel_trim_dac[channel] == 31 and rate > disable_threshold: c.disable(chip_key,[channel]) c[chip_key].config.csa_enable[channel] = 0 c.write_configuration(chip_key,'csa_enable') print('disable threshold reached') repeat[(chip_key,channel)] = False c[chip_key].config.pixel_trim_dac[channel] = min(c[chip_key].config.pixel_trim_dac[channel]+1,31) print('\ttrim',c[chip_key].config.pixel_trim_dac[channel]) c.write_configuration(chip_key,'pixel_trim_dac') c.write_configuration(chip_key,'pixel_trim_dac') if rate > reset_threshold: c = _reset_and_reload(c,controller_config) # walk down trims if not target_reached: print('reducing trims') for chip_key, channels in channels_to_configure.items(): if chip_key in c.chips: for channel in channels: if repeat[(chip_key,channel)] and c[chip_key].config.pixel_trim_dac[channel] > 0: c[chip_key].config.pixel_trim_dac[channel] -= 1 elif c[chip_key].config.pixel_trim_dac[channel] == 0: repeat[(chip_key,channel)] = False c.write_configuration(chip_key,'pixel_trim_dac') c.write_configuration(chip_key,'pixel_trim_dac') target_reached = False c.reads = [] print('initial pixel trims:') for chip_key in chips_to_configure: if chip_key in c.chips: print('\t',chip_key,c[chip_key].config.pixel_trim_dac) print() print('increasing pixel trim') above_target = defaultdict(lambda : False) for _ in range(10): # check rate print('check rate',end=' ') base.flush_data(c) c.multi_read_configuration(read_config_spec,timeout=runtime,message='rate check') triggered_channels = c.reads[-1].extract('chip_key','channel_id',packet_type=0) print('(total rate={}Hz)'.format(len(triggered_channels)/runtime)) for chip_key, channel in set(map(tuple,triggered_channels)): rate = triggered_channels.count([chip_key,channel])/runtime if rate > target_rate and channel in channels_to_configure[chip_key] \ and not above_target[(chip_key,channel)] and chip_key in c.chips: print('increasing pixel trim',chip_key,channel,'rate was',rate,'Hz') above_target[(chip_key,channel)] = True if c[chip_key].config.pixel_trim_dac[channel] == 31 and rate > disable_threshold: c.disable(chip_key,[channel]) c[chip_key].config.csa_enable[channel] = 0 c.write_configuration(chip_key,'csa_enable') print('disable threshold reached') c[chip_key].config.pixel_trim_dac[channel] = min(c[chip_key].config.pixel_trim_dac[channel]+1,31) print('\ttrim',c[chip_key].config.pixel_trim_dac[channel]) c.write_configuration(chip_key,'pixel_trim_dac') c.write_configuration(chip_key,'pixel_trim_dac') if rate > reset_threshold: c = _reset_and_reload(c,controller_config) # continue once rate is below target if not above_target or not any(above_target.values()): break else: above_target = defaultdict(lambda : False) c.reads = [] print('final pixel trims:') for chip_key in chips_to_configure: if chip_key in c.chips: print('\t',chip_key,c[chip_key].config.pixel_trim_dac) print() print('saving configs...') for chip_key in chips_to_configure: if chip_key in c.chips: # save config time_format = time.strftime('%Y_%m_%d_%H_%M_%S_%Z') config_filename = 'config-'+str(chip_key)+'-'+time_format+'.json' c[chip_key].config.write(config_filename, force=True) print('\t',chip_key,'saved to',config_filename) print('final configured rate: ',end='') base.flush_data(c) c.run(runtime,'final rate') n_packets = len(c.reads[-1].extract('io_group',packet_type=0)) print('{:0.2f}Hz ({:0.2f}Hz/channel)'.format(n_packets/runtime,n_packets/runtime/sum([len(ch) for ch in channels_to_configure.values()]))) print('END AUTOCONFIG') return c
create_new_objects) com7 = create_comment(ice_userprofile, 'this is com7', com1, create_new_objects) com8 = create_comment(ice_userprofile, 'this is com8', com2, create_new_objects) # make a client available to use client = Client() # make paginator available to use paginator = Paginator(NewsItem.objects.all(), 15) if __name__ == '__main__': base.main(setup_path_and_args, init_db, usage) # import all of this so I can use it easily with IPython from news.models import UserProfile, NewsItem, Comment, \ Rated, Rankable from django.contrib.auth.models import User from news.helpers import get_child_comments, get_next_with_pages, \ get_pagenum, get_paginator_page, datetime_ago, improve_url, assert_or_404 from news.shortcuts import get_object_or_404, \ get_from_POST_or_404, get_from_session from news.validation import valid_comment_text, valid_email, \ valid_next_redirect, valid_password, valid_text, \ valid_title, valid_url, valid_username from django.contrib.sessions.backends.db import SessionStore from django.contrib.sessions.models import Session
def test_jump(self): self.qpart.text = '\n' * 5 QTest.keyClick(self.qpart, Qt.Key_B, Qt.ControlModifier) for i in range(2): QTest.keyClick(self.qpart, Qt.Key_Down) QTest.keyClick(self.qpart, Qt.Key_B, Qt.ControlModifier) for i in range(2): QTest.keyClick(self.qpart, Qt.Key_Down) QTest.keyClick(self.qpart, Qt.Key_B, Qt.ControlModifier) self.assertEqual(self._markedBlocks(), [0, 2, 4]) self.qpart.cursorPosition = (0, 0) QTest.keyClick(self.qpart, Qt.Key_PageDown, Qt.AltModifier) self.assertEqual(self.qpart.cursorPosition[0], 2) QTest.keyClick(self.qpart, Qt.Key_PageDown, Qt.AltModifier) self.assertEqual(self.qpart.cursorPosition[0], 4) QTest.keyClick(self.qpart, Qt.Key_PageDown, Qt.AltModifier) self.assertEqual(self.qpart.cursorPosition[0], 4) QTest.keyClick(self.qpart, Qt.Key_PageUp, Qt.AltModifier) self.assertEqual(self.qpart.cursorPosition[0], 2) QTest.keyClick(self.qpart, Qt.Key_PageUp, Qt.AltModifier) self.assertEqual(self.qpart.cursorPosition[0], 0) QTest.keyClick(self.qpart, Qt.Key_PageUp, Qt.AltModifier) self.assertEqual(self.qpart.cursorPosition[0], 0) if __name__ == '__main__': base.main()
import json import tornado.web import apymongo from apymongo import json_util import base class FindHandler(tornado.web.RequestHandler): """ Returns all records in the testdb.testcollection collection. Notice the use of the "loop" method. """ @tornado.web.asynchronous def get(self): conn = apymongo.Connection() coll = conn['testdb']['testcollection'] cursor = coll.find(callback=self.handle) cursor.loop() def handle(self, response): self.write(json.dumps(response, default=json_util.default)) self.finish() if __name__ == "__main__": base.main(FindHandler)
self.debug_reports(r, uhdev, events) self.assertInputEvents(expected, events) r = uhdev.event(1, -2, wheels=(0, 1)) expected.append(libevdev.InputEvent(libevdev.EV_REL.REL_HWHEEL, 1)) events = uhdev.next_sync_events() self.debug_reports(r, uhdev, events) self.assertInputEvents(expected, events) class TestMiMouse(TestWheelMouse): def create_device(self): return MIDongleMIWirelessMouse() def assertInputEvents(self, expected_events, effective_events): # Buttons and x/y are spread over two HID reports, so we can get two # event frames for this device. remaining = self.assertInputEventsIn(expected_events, effective_events) try: remaining.remove(libevdev.InputEvent(libevdev.EV_SYN.SYN_REPORT, 0)) except ValueError: # If there's no SYN_REPORT in the list, continue and let the # assert below print out the real error pass self.assertEqual(remaining, []) if __name__ == "__main__": main(sys.argv[1:])
coll = self.connection['testdb']['testcollection'] to_insert = {"testkey1":22, "testkey2":[2,3], "testkey3":{"inner1":2, "inner2":'testval'}} coll.insert(to_insert,callback=self.count_handler) def count_handler(self,response): def callback(r): self.final_handler(response,r) coll = self.connection['testdb']['testcollection'] coll.count(callback = callback) def final_handler(self,rec_id, response): msg = "You just inserted record " + str(rec_id) + '. There are now ' + str(response) + ' records.' self.write(msg) self.finish() if __name__ == "__main__": base.main(InsertHandler)
current_ids.append(current_inverted_list[current_index]) # 然后,如果 current_ids 的所有元素都一样,那么表明这个单词在这个元素对应的文档中都出现了 if all(x == current_ids[0] for x in current_ids): result.append(current_ids[0]) query_words_index = [x + 1 for x in query_words_index] continue # 如果不是,我们就把最小的元素加一 min_val = min(current_ids) min_val_pos = current_ids.index(min_val) query_words_index[min_val_pos] += 1 @staticmethod def parse_text_to_words(text): # 使用正则表达式去除标点符号和换行符 text = re.sub(r'[^\w ]', ' ', text) # 转为小写 text = text.lower() # 生成所有单词的列表 word_list = text.split(' ') # 去除空白单词 word_list = filter(None, word_list) # 返回单词的 set return set(word_list) if __name__ == "__main__": search_engine = BOWInvertedIndexEngine() main(search_engine)
def tidify(target): nums = [int(s) for s in str(target)] if len(nums) < 2: return i = len(nums) - 1 while i > 0: if nums[i] < nums[i - 1]: target -= int(''.join([str(n) for n in nums[i:]])) + 1 nums = [int(s) for s in str(target)] i -= 1 return target def solve(problem): #@problem: (list of list of int) a list where each item is one of the #lines in the problem. Each item is also a list, of each number in #the line parsed and separated # #@return: (str) the solution to be printed target = problem[0] while target > 0: if is_tidy(target): return target target = tidify(target) return 0 if __name__ == "__main__": base.main(solve)
# # # This file is part of the program relax (http://www.nmr-relax.com). # # # # This program is free software: you can redistribute it and/or modify # # it under the terms of the GNU General Public License as published by # # the Free Software Foundation, either version 3 of the License, or # # (at your option) any later version. # # # # This program is distributed in the hope that it will be useful, # # but WITHOUT ANY WARRANTY; without even the implied warranty of # # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # # GNU General Public License for more details. # # # # You should have received a copy of the GNU General Public License # # along with this program. If not, see <http://www.gnu.org/licenses/>. # # # ############################################################################### # relax module imports. from base import cluster, main, NUM_SPINS_CLUSTER, NUM_SPINS_SINGLE, single, Profile from lib.dispersion.variables import EXP_TYPE_R1RHO, MODEL_NS_R1RHO_3SITE_LINEAR # Setup. SINGLE = Profile(exp_type=[EXP_TYPE_R1RHO], num_spins=NUM_SPINS_SINGLE, model=MODEL_NS_R1RHO_3SITE_LINEAR, r2=5.0, dw_AB=1.0, dw_BC=2.0, pA=0.8, kex_AB=5000.0, pB=0.1, kex_BC=3000.0, spins_params=['r2', 'dw_AB', 'dw_BC', 'pA', 'kex_AB', 'pB', 'kex_BC']) CLUSTER = Profile(exp_type=[EXP_TYPE_R1RHO], num_spins=NUM_SPINS_CLUSTER, model=MODEL_NS_R1RHO_3SITE_LINEAR, r2=5.0, dw_AB=1.0, dw_BC=2.0, pA=0.8, kex_AB=5000.0, pB=0.1, kex_BC=3000.0, spins_params=['r2', 'dw_AB', 'dw_BC', 'pA', 'kex_AB', 'pB', 'kex_BC']) # Execute main function. if __name__ == "__main__": main()
# update least recently ranked rankable old_rankable = Rankable.objects.all().order_by('last_ranked_date')[0] do_update(old_rankable) # update random top 50 frontpage rankable do_random_update_from_query(get_frontpage_querymanager()) # this is needed to Django doesn't hog memory if we are running under # DEBUG = True db.reset_queries() except KeyboardInterrupt: # exit gracefully on ctrl-c if not daemonize: print >>sys.stderr, "" sys.exit(0) except: # Print stack trace and just keep going for any other type of exception if not daemonize: print >>sys.stderr, "\nException in update_ranking.py:" print >>sys.stderr, '-'*60 traceback.print_exc(file=sys.stderr) print >>sys.stderr, '-'*60 if __name__ == '__main__': base.main(setup_path_and_args, update_ranking, usage)