def make_control(argv): # return a Bunch print argv if len(argv) not in (2, 3): usage('invalid number of arguments') pcl = ParseCommandLine.ParseCommandLine(argv) arg = Bunch.Bunch( base_name=argv[0].split('.')[0], test=pcl.has_arg('--test'), ) random_seed = 123456 random.seed(random_seed) path = Path.Path() # use the default dir_input debug = False out_file_base = ( arg.base_name + ('-test' if arg.test else '') ) return Bunch.Bunch( arg=arg, debug=debug, max_sale_price=85e6, # according to Wall Street Journal path_in=path.dir_working() + 'samples-train.csv', path_out_txt=path.dir_working() + out_file_base + '.csv', path_out_pickle=path.dir_working() + out_file_base + '.pickle', random_seed=random_seed, test=arg.test, )
def get_TSCV(): """Generate TSCV subtable offsets. Creates a Bunch, indexed by table name, where each entry is another Bunch containing the subtable name, table length, total length (inc RPC header), and offset. """ tscv = Bunch.Bunch() for tup in _TSCV_tables: tscv[tup[0]] = Bunch.Bunch(name=tup[0], tbllen=tup[2], totlen=tup[2] + TSC_rpc_header_size, offset=tup[1], comment=tup[3]) # Write CIAX offsets LIST1 = [n for n in xrange(1, 9)] LIST2 = [n for n in xrange(1, 65)] for AA1 in LIST1: for AA2 in LIST2: AAA = "%02d%02d" % (AA1, AA2) BBBBB = "1%1d%02d00" % (AA1, AA2) for tup in _CIAX_tables: if re.match(tup[0], AAA): subtable = "TSCV%4.4s" % AAA tscv[subtable] = Bunch.Bunch(name=subtable, tbllen=tup[1], totlen=tup[1] + TSC_rpc_header_size, offset=int(BBBBB), comment=tup[2]) return tscv
def __init__(self, logger): self.logger = logger self.parser = CommandParser.CommandParser() self.para_lexer = para_lexer.paraScanner(logger=logger, debug=0) self.para_parser = paraParser(self.para_lexer, logger=logger, debug=0) self.para = Bunch.threadSafeBunch() self.userRegMap = Bunch.threadSafeBunch() self.systemRegMap = Bunch.threadSafeBunch() self.commandRegMap = Bunch.threadSafeBunch()
def get_SOSS_offsets(): """Generate SOSS subtable offsets. Creates a Bunch, indexed by table name, where each entry is another Bunch containing the subtable name and offset. """ res = Bunch.Bunch() for tup in _SOSS_subtables: res[tup[0]] = Bunch.Bunch(name=tup[0], table=None, offset=tup[1]) return res
def get_TSCS_offsets(): """Generate TSCS subtable offsets. Creates a Bunch, indexed by table name, where each entry is another Bunch containing the subtable name and offset. """ res = Bunch.Bunch() for tup in _TSCS_subtables: res[tup[0]] = Bunch.Bunch(name=tup[0], table='TSCS', offset=tup[1] + TSC_rpc_header_size - 1) return res
def get_TSCV_offsets(): """Another view of TSCV subtable offsets, compatible with the views for TSCL and TSCS. """ res = Bunch.Bunch() for bnch in get_TSCV().values(): res[bnch.name] = Bunch.Bunch(name=bnch.name, table='TSCV', offset=bnch.offset + TSC_rpc_header_size - 1) return res
def __init__(self, name, logger, ev_quit=None, threadPool=None, numthreads=10): """ Constructor for the PubSubBase class. name pubsub name logger logger to be used for any diagnostic messages threadPool optional, threadPool for serving PubSub activities numthreads if a threadPool is NOT furnished, the number of threads to allocate """ super(PubSubBase, self).__init__() self.logger = logger self.name = name self.numthreads = numthreads # Handles to subscriber remote proxies self._partner = Bunch.threadSafeBunch() # Defines aggregate channels self.aggregates = Bunch.threadSafeBunch() # Termination event if not ev_quit: ev_quit = threading.Event() self.ev_quit = ev_quit # If we were passed in a thread pool, then use it. If not, # make one. Record whether we made our own or not. if threadPool != None: self.threadPool = threadPool self.mythreadpool = False else: self.threadPool = Task.ThreadPool(logger=self.logger, ev_quit=self.ev_quit, numthreads=self.numthreads) self.mythreadpool = True # For task inheritance: self.tag = 'PubSub' self.shares = ['logger', 'threadPool'] # For handling subscriber info self._lock = threading.RLock() self._sub_info = {} # number of seconds to wait before unsubscribing a subscriber # who is unresponsive self.failure_limit = 60.0
def p_error(arg): global errors, errinfo errors += 1 if isinstance(arg, LexToken): errstr = ("Parse error at line %d, token %s ('%s')" % (arg.lineno, arg.type, str(arg.value))) #print errstr errinfo.append(Bunch.Bunch(lineno=arg.lineno, errstr=errstr, token=arg)) else: errstr = ("Parse error: %s" % str(arg)) #print errstr errinfo.append(Bunch.Bunch(lineno=0, errstr=errstr, token=arg))
def make_robunch(name, hostports=None, auth=None, secure=default_secure, ns=None): """Creates a bunch with handles to all of the individual services running on each host, plus a remoteObjectSP handle to all hosts. If the hostport list is not given then the hostport list is queried from the local name server. """ # If no list of hostnames is given, then query it from the local name server. if (not hostports): if ns: hostports = ns.getHosts(name) elif default_ns: hostports = default_ns.getHosts(name) else: # TODO: raise an exception? hostports = [] sp = servicePack() bunch = Bunch.Bunch() for (host, port) in hostports: host = socket.getfqdn(host) client = remoteObjectClient(host=host, port=port, name=('%s(%s)' % (name, host)), auth=auth, secure=secure) bunch['%s:%d' % (host, port)] = client sp.add(client) bunch['all'] = remoteObjectSPAll('%s(all)' % (name), svcpack=sp) return bunch
def __init__(self, logger, env, ev_quit=None): super(HSC, self).__init__() self.logger = logger self.env = env # Convoluted but sure way of getting this module's directory self.mydir = os.path.split(sys.modules[__name__].__file__)[0] if not ev_quit: self.ev_quit = threading.Event() else: self.ev_quit = ev_quit # Holds our link to OCS delegate object self.ocs = None # We define our own modes that we report through status # to the OCS self.mode = 'default' # Thread-safe bunch for storing parameters read/written # by threads executing in this object self.param = Bunch.threadSafeBunch() # Interval between status packets (secs) self.param.status_interval = 10.0
def get_vars(plist, include_dirs): """Build substitution dictionary from the <Parameter_List> section of an OPE file.""" lines = plist.split('\n') substDict = Bunch.caselessDict() while len(lines) > 0: line = lines.pop(0) line = line.strip() match = load_regex.match(line) if match: prepend_prm(lines, match.group(1), include_dirs) continue # convert to uc line = toupper(line) if line.startswith('#') or line.startswith('*') or (len(line) == 0): continue if '=' in line: idx = line.find('=') var = line[0:idx].strip() val = line[idx+1:].strip() substDict[var] = val return substDict
def make_control(argv): 'return a Bunch of controls' eprint('argv', argv) parser = argparse.ArgumentParser() parser.add_argument('--i', help='file to read, if not stdin') parser.add_argument('--o', help='path to output file, if not stdout') parser.add_argument('--unittest', action='store_true', help='run unit tests and exit') parser.add_argument( '--test', action='store_true', help='if present, truncated input and enable test code') parser.add_argument('--trace', action='store_true', help='if present, call pdb.set_trace() early in run') arg = parser.parse_args(argv[1:]) # ignore invocation name arg.me = parser.prog.split('.')[0] if arg.trace: pdb.set_trace() return Bunch.Bunch( arg=arg, test=arg.test, )
def get_hosts(insname, nshost): import xmlrpclib try: # Query the name server on the Gen2 host for the service # names of the instrument and the status subsystems proxy = xmlrpclib.ServerProxy('http://%s:7075/' % nshost) insint_hosts = proxy.getHosts(insname) if len(insint_hosts) == 0: raise OCSintError("No instrument interface found") status_hosts = proxy.getHosts('status') if len(status_hosts) == 0: raise OCSintError("No status interface found") # Strip off FQDN to short name cmds = insint_hosts[0][0].split('.')[0] sdst = status_hosts[0][0].split('.')[0] d = Bunch.Bunch(obshost=cmds, gethost=cmds, obchost=cmds, stathost=sdst) return d except Exception, e: raise OCSintError("Couldn't configure interfaces: %s" % str(e))
def _get_params(self, ast_params): assert ast_params.tag == 'param_list', \ SkCompileError("Malformed parameter AST: %s" % str(ast_params)) paramList = [] paramDict = {} statusDict = {} aliasList = [] res = Bunch.Bunch(paramList=paramList, paramDict=paramDict, statusDict=statusDict, aliasList=aliasList) for ast_kvp in ast_params.items: assert (ast_kvp.tag == 'key_value_pair') and ( len(ast_kvp.items) == 2), \ SkCompileError("Malformed key value pair AST: %s" % ( str(ast_kvp))) # If this item is a status alias, add it to the dict of status # values that will need to be fetched (varname, val_ast) = ast_kvp.items if self.is_aliasref(val_ast): statusAlias = val_ast.items[0] statusDict[statusAlias] = '##NODATA##' value = None aliasList.append((varname, statusAlias)) else: value = self.get_value(val_ast) paramList.append(varname) paramDict[varname] = value return res
def add_status(self, name, width, alias, label): lbl = gtk.Label(label) lbl.show() self.table.attach(lbl, self.col, self.col + 1, self.row - 1, self.row, xoptions=gtk.FILL, yoptions=gtk.FILL, xpadding=1, ypadding=1) field = gtk.Entry() field.set_width_chars(width) field.set_text(alias) field.show() self.table.attach(field, self.col, self.col + 1, self.row, self.row + 1, xoptions=gtk.FILL, yoptions=gtk.FILL, xpadding=1, ypadding=1) self.bump_col() name = name.lower() self.params[name] = Bunch.Bunch(widget=field, alias=alias, type='field') self.addParam(name)
def addvarrefs(lineno, line): offset = 0 match = regex_varref.match(line) while match: pfx, varref, sfx = match.groups() #print "1) %d pfx=(%s) varref=(%s) sfx=(%s)" % ( # lineno, pfx, varref, sfx) offset += len(pfx) start = offset offset += len(varref) end = offset varref = varref.upper()[1:] refset.add(varref) bnch = Bunch.Bunch(varref=varref, lineno=lineno, text=line, start=start, end=end) reflist.append(bnch) try: res = varDict[varref] except KeyError: badset.add(varref) badlist.append(bnch) match = regex_varref.match(sfx)
def add_hook(self, name, cbfn, args=None, kwdargs=None): if args is None: args = [] if kwdargs is None: kwdargs = {} self.hooks[name].append(Bunch.Bunch(cbfn=cbfn, args=args, kwdargs=kwdargs))
def getFramesByDate(fromdate, todate, no_time_xfer=False, no_time_saved=False, no_time_hilo=False, skip_cancel=False): """Get all the frames that were allocated between _fromdate_ and _todate_, which should be datetime objects. Returns a list of bunches (records) sorted by time. """ with locks.frame: try: print "1" # Get the records from the Frame table matching selected dates fr_recs = Frame.query.filter(Frame.time_alloc >= fromdate).filter( Frame.time_alloc <= todate) print "2" # Order by time of allocation fr_recs = fr_recs.order_by('time_alloc').all() res = [] # For each frame for fr_rec in fr_recs: b = Bunch.Bunch(frameid=str(fr_rec), time_alloc=fr_rec.time_alloc, time_xfer=None, time_saved=None, time_hilo=None, time_stars=None) res.append(b) print "3" # Get the transfer records corresponding to this frame tr_recs = FrameTransfer.query.filter( FrameTransfer.frame == fr_rec) tr_recs = tr_recs.order_by('time_done').all() # Iterate over the transfer records, looking for special ones for rec in tr_recs: trtype = rec.xfer_type if not trtype: continue trtype = trtype.strip() if trtype == 'inst->gen2': b.setvals(time_xfer=rec.time_start, time_saved=rec.time_done, time_hilo=rec.time_done) elif trtype == 'gen2->stars': print "found stars" b.setvals(time_stars=rec.time_done) # TODO: "expunge" temp records from session? print "4" return res except dbError, e: raise g2dbError(e)
def __init__(self, frame, name, title, execfn): self.frame = frame self.params = Bunch.Bunch() self.paramList = [] self.row = 1 self.col = 1 self.max_col = self.col self.btn_width = 20 self.execfn = execfn self.table = gtk.Table(rows=2, columns=2) self.table.set_name('launcher') self.table.show() self.btn_exec = gtk.Button(title) self.btn_exec.set_size_request(default_width, -1) self.btn_exec.connect("clicked", lambda w: self.execute()) self.btn_exec.show() self.table.attach(self.btn_exec, 0, 1, 1, 2, xoptions=gtk.FILL, yoptions=gtk.FILL, xpadding=1, ypadding=1) frame.pack_start(self.table, expand=False, fill=True)
def mkProp(self, proposalid, starttime=None, endtime=None, instruments=[], purpose='none', oplevel=[]): """Facility for cranking out proposal db records. """ now = time.time() if not starttime: starttime = now if not endtime: endtime = now bnch = Bunch.Bunch(createtime=now, updatetime=now, proposalid=proposalid, starttime=starttime, endtime=endtime, instruments=instruments, purpose=purpose, oplevel=oplevel) return bnch
def _subscribe(self, subscriber, proxy_obj, channels, options): """Add a subscriber (named by _subscriber_ and accessible via object _proxy_obj_) to channel (or list of channels) _channels_ and with options _options_. This is an internal method. See class 'PubSub' and its method subscribe() for a public interface. """ channels = set(channels) can_unsubscribe = True if isinstance(options, dict): # Does subscriber allow us to unsubscribe them if they are # unreachable? Default=True if options.has_key('unsub'): can_unsubscribe = options['unsub'] self._lock.acquire() try: # Record proxy in _partner table self._partner[subscriber] = Bunch.Bunch(proxy=proxy_obj, time_failure=None, can_unsubscribe=can_unsubscribe) for channel in channels: bunch = self._get_channelInfo(channel, create=True) bunch.subscribers.add(subscriber) # Compute subscriber relationships self.compute_subscribers() finally: self._lock.release()
def __init__(self, statusObj, logger, monitor, monchannels=['status'], checkptfile='status.cpt'): self.logger = logger self.monitor = monitor self.monchannels = monchannels self.statusObj = statusObj self.checkptfile = checkptfile # We'll share our monitor's thread pool self.threadPool = self.monitor.get_threadPool() # Needed for starting our own tasks on it self.tag = 'status' self.shares = ['logger', 'threadPool'] # This holds the current decoded status values self.g2status = Bunch.threadSafeBunch() # Lock for mutual exclusion self._lock = self.g2status.getlock() # For looking up information about tables, aliases, etc. # share statusInfo object self.statusInfo = self.statusObj.get_statusInfo() # For deriving status self.derive = Derive.statusDeriver(self, self.logger) # Tables to ignore, can be set dynamically self.ignoreTables = set([])
def make_control(argv): 'return a Bunch' print argv parser = argparse.ArgumentParser() parser.add_argument('invocation') parser.add_argument('training_data', choices=arg_type.training_data_choices) parser.add_argument('neighborhood', type=arg_type.neighborhood) parser.add_argument('model', choices=['en', 'gb', 'rf']) parser.add_argument('n_processes', type=int) parser.add_argument('--test', action='store_true') parser.add_argument('--trace', action='store_true') parser.add_argument('--year', type=arg_type.year) arg = parser.parse_args(argv) arg.me = arg.invocation.split('.')[0] if arg.trace: pdb.set_trace() dir_working = Path.Path().dir_working() path_out_dir = (os.path.join(dir_working, arg.me + '-test') if arg.test else os.path.join(dir_working, arg.me)) dirutility.assure_exists(path_out_dir) return Bunch.Bunch( arg=arg, path_out_log=os.path.join(path_out_dir, '0log.txt'), timer=Timer.Timer(), )
class remoteObjectManagerService(ro.remoteObjectServer): def __init__(self, name='', host=None, port=None, usethread=False, logger=None, stdout=None, ev_quit=None, authDict=None, secure=ro.default_secure, cert_file=ro.default_cert, threadPool=None): # Logger for logging debug/error messages if not logger: self.logger = ro.nullLogger() else: self.logger = logger try: if stdout: self.logger.info("Using '%s' for default output" % stdout) self.stdout = open(stdout, "a") else: #self.stdout = open("/dev/null", "a") self.logger.info("Using STDOUT for default output") self.stdout = sys.stdout except IOError, e: self.logger.info("Using /dev/null for default output") self.stdout = open("/dev/null", "a") if not ev_quit: self.ev_quit = threading.Event() else: self.ev_quit = ev_quit # will set self.name, etc. ro.remoteObjectServer.__init__(self, name=name, host=host, port=port, logger=self.logger, usethread=usethread, ev_quit=self.ev_quit, authDict=authDict, secure=secure, cert_file=cert_file, threadPool=threadPool) self.map = {} self.lock = threading.RLock() # maximum seconds to sleep, is subdivided by a count of processes monitored self.sleepquantum = 1.0 self.count = 0 # Table of current load average info self.myldavg = Bunch.threadSafeBunch() self.starttime = time.time()
def make_control(argv): print 'argv', argv parser = argparse.ArgumentParser() parser.add_argument('ticker') parser.add_argument('--test', action='store_true') parser.add_argument('--trace', action='store_true') arg = parser.parse_args(argv[1:]) # ignore invocation name arg.me = 'bds' if arg.trace: pdb.set_trace() random_seed = 123 random.seed(random_seed) # put all output in directory path_out_dir = dirutility.assure_exists('../data/working/' + arg.me + '/' + arg.ticker + '/') return Bunch.Bunch( arg=arg, path_in_dir= '../data/input/7chord_team_folder/NYU/7chord_ticker_universe_nyu_poc/', path_in_glob='*.csv', path_out_dir=path_out_dir, path_out_log=path_out_dir + '0log.txt', path_out_report_ticker_maturity_template=path_out_dir + '%s.txt', path_out_ticker_maturity_template_csv=path_out_dir + '%s.csv', path_out_report_counts=path_out_dir + '0counts.txt', path_out_report_na=path_out_dir + '0na.txt', random_seed=random_seed, test=arg.test, timer=Timer.Timer(), )
def unpack_payload(payload): # Skip packets that are not in Monitor format if not isinstance(payload, dict) or (not payload.has_key('msg')): raise MonitorError("PubSub value is not a Monitor payload: %s" % (str(payload))) return Bunch.Bunch(payload)
def add_radio(self, name, optionList, label): lbl = gtk.Label(label) self.table.attach(lbl, self.col, self.col + 1, self.row - 1, self.row, xoptions=gtk.FILL, yoptions=gtk.FILL, xpadding=1, ypadding=1) lbl.show() btn = None options = [] for opt, val in optionList: btn = gtk.RadioButton(group=btn, label=opt) self.table.attach(btn, self.col, self.col + 1, self.row, self.row + 1, xoptions=gtk.FILL, yoptions=gtk.FILL, xpadding=1, ypadding=1) options.append((btn, val)) self.bump_col() btn.show() name = name.lower() self.params[name] = Bunch.Bunch(get_fn=self.get_radio, options=options) self.addParam(name)
def _getframe(self, frameid, **kwdargs): with self.lock: if self.framecache.has_key(frameid): d = self.framecache[frameid] d.update(kwdargs) return d # Create a new entry dct = dict.fromkeys(headers, '') dct['frameid'] = frameid dct['status'] = 'A' d = Bunch.Bunch(dct) d.update(kwdargs) self.framecache[frameid] = d try: lastid = self.framelist[-1] if frameid < lastid: self.needsort = True except IndexError: # First frame pass self.framelist.append(d) return d
def add_input(self, name, width, defVal, label): lbl = gtk.Label(label) lbl.show() self.table.attach(lbl, self.col, self.col + 1, self.row - 1, self.row, xoptions=gtk.FILL, yoptions=gtk.FILL, xpadding=1, ypadding=1) field = gtk.Entry() field.set_width_chars(width) field.set_text(str(defVal)) field.show() self.table.attach(field, self.col, self.col + 1, self.row, self.row + 1, xoptions=gtk.FILL, yoptions=gtk.FILL, xpadding=1, ypadding=1) self.bump_col() name = name.lower() self.params[name] = Bunch.Bunch(widget=field, get_fn=self.get_entry) self.addParam(name)
def make_control(argv): 'return a Bunch' print argv parser = argparse.ArgumentParser() parser.add_argument('invocation') parser.add_argument('model', choices=('en', 'gb', 'rf')) parser.add_argument('processes') parser.add_argument('--test', action='store_true') parser.add_argument('--trace', action='store_true') arg = parser.parse_args(argv) arg.me = arg.invocation.split('.')[0] if arg.trace: pdb.set_trace() try: arg.processes_int = int(arg.processes) except: print 'processes is not an int; was: %s' % arg.processes raise ValueError dir_working = Path.Path().dir_working() path_out_dir = (os.path.join(dir_working, arg.me + '-test') if arg.test else os.path.join(dir_working, arg.me)) dirutility.assure_exists(path_out_dir) return Bunch.Bunch( arg=arg, path_out_log=os.path.join(path_out_dir, '0log.txt'), timer=Timer.Timer(), )