def register_monitor(self, facilities): # we have to import 'logging' not at the top of the module import logging self.setup_listener_gettext() t = current_thread() t.monitor_facilities = ltrace_str_to_int(facilities) t.monitor_uuid = uuid.uuid4() logging.notice(_(u'New trace session started with UUID {0}, ' u'facilities {1}.').format(stylize(ST_UGID, t.monitor_uuid), stylize(ST_COMMENT, facilities))) # The monitor_lock avoids collisions on listener.verbose # modifications while a flood of messages are beiing sent # on the wire. Having a per-thread lock avoids locking # the master `options.monitor_lock` from the client side # when only one monitor changes its verbose level. This # is more fine grained. t.monitor_lock = RLock() with options.monitor_lock: options.monitor_listeners.append(t) # return the UUID of the thread, so that the remote side # can detach easily when it terminates. return t.monitor_uuid
def find_server_Linux(favorite, group): """ Return the hostname (or IP, in some conditions) of our Licorn® server. These are tried in turn: - ``LICORN_SERVER`` environment variable (always takes precedence, with a message; this variable should be used for debug / development purposes only; - `zeroconf` lookup; this the "standard" way of discovery our server. - LAN dhcp server on distros where it is supported (currently Ubuntu and Debian). :param favorite: UUID of our optional favorite server, as a string formated like `LMC.configuration.system_uuid` (no dashes). :param group: an LXC cpuset. This is an undocumented feature. please do not use outside of debugging / testing environments. """ env_server = os.getenv('LICORN_SERVER', None) if env_server: logging.notice(_(u'Using environment variable value {0} for our ' u'server; unset {1} if you prefer automatic detection.').format( stylize(ST_IMPORTANT, env_server), stylize(ST_NAME, 'LICORN_SERVER'))) if ':' in env_server: return env_server.split(':') return env_server, None return find_zeroconf_server_Linux(favorite, group) """
def write_file(self, local_path, remote_path): filename = os.path.basename(local_path) try: logging.debug("Changing remote directory to %s", remote_path) self.client.cwd(remote_path) except ftplib.error_perm: if not self.dry_run: logging.notice("Creating remote directory %s" % remote_path) self.client.mkd(remote_path) else: logging.warn("Remote path %s does not exist and will be created on write" % remote_path) # Just test that we can perform the necessary operations # without writing anything logging.info( "%s writing %s to %s" % ( "Not" if self.dry_run else "", local_path, remote_path)) if self.dry_run: return with open(local_path, "rb") as f: self.client.storbinary("STOR %s" % filename,f)
def _run(self, ns): config = Configuration(ns.configdir) config.use_badlog = ns.known_bad if not is_sorted(ns.actions): logging.warning("Actions will be performed in the order of the respective command-line arguments") logging.notice("Action order as requested: " + ', '.join(a.name for a in ns.actions)) logging.notice("Suggested natural order of actions: " + ', '.join(a.name for a in sorted(set(ns.actions)))) for (action, constants) in itertools.groupby(sorted(ns.actions)): count = sum(1 for c in constants) if count != 1: logging.warning("Redundant action {:s} will be performed {:d} times".format(action.name, count)) with Manager(ns.bindir, ns.datadir, timeout=ns.timeout, config=config) as mngr: with _make_badlog(ns, logfile=mngr.badlog) as badlog: if ns.clean == 0: if not ns.actions: logging.warning("Nothing will be done") for action in ns.actions: _perform_action(mngr, action, badlog) elif ns.clean == 1: if not ns.actions: logging.warning("Nothing will be cleaned") for action in ns.actions: _perform_cleaning(mngr, action) elif ns.clean == 2: if ns.actions: raise FatalError("No action must be specified in combination with {!r}".format('-cc')) if ns.datadir == '.': raise FatalError("Cowardly refusing to recursively delete the current working directory") mngr.clean_all() else: raise AssertionError(ns.clean)
def _gen_generic(mngr : Manager, gen : Generators, bl : _BucketList): assert not gen.imported while bl: size = bl.pick() with mngr.make_tempdir() as tempdir: try: logging.info("Generating {:s} graph with {:s} ...".format(size.name, gen.name)) meta = _call_generic_tool(mngr, gen, size, tempdir=tempdir) except RecoverableError as e: logging.error("Cannot generate graph: {!s}".format(e)) continue actualsize = GraphSizes.classify(meta['nodes']) if actualsize != size: if bl.offer(actualsize): logging.notice( "Asked {:s} for a {:s} graph but got a {:s} one which is still useful though".format( gen.name, size.name, actualsize.name)) else: logging.warning( "Asked {:s} for a {:s} graph but got a {:s} one which I'll have to discard".format( gen.name, size.name, actualsize.name)) continue if _insert_graph(mngr, gen, meta): bl.decrement(actualsize) if meta.get('layout'): _insert_layout(mngr, meta, native=True)
def parse_args2(default_log_filename=None, parser = None): import argparse if not parser: parser= argparse.ArgumentParser() parser.add_argument('-v', '--verbose', action='count', help="verbose", default=0) parser.add_argument('-o', '--logfile', default=default_log_filename) try: import argcomplete argcomplete.autocomplete(parser) except: pass args = parser.parse_args() loggers = [logging.root] if args.verbose == 0: for logger in loggers: init_logging(logger, logging.NOTICE, True, args.logfile) elif args.verbose == 1: for logger in loggers: init_logging(logger, logging.INFO, True, args.logfile) elif args.verbose > 1: for logger in loggers: init_logging(logger, logging.DEBUG, True, args.logfile) logging.notice("start running: " + ' '.join(sys.argv)) logging.info(args) return args
def run(board, path, name): path = os.path.expandvars(path) path = os.path.expanduser(path) if not os.path.exists(path): raise RuntimeError('File ' + path + ' not found') vs = {} execfile(path, vs) if name not in vs: raise RuntimeError('Menu ' + name + ' not found in ' + path) menu = vs[name] logging.notice('Updating event sizes') evszs = board.computeEventSizes(menu) for i, s in evszs.iteritems(): logging.info('mode %d: %d', i, s) menu.mode(i).eventSize = s for l in str(menu).split('\n'): logging.debug('%s', l) rc = board.getReadout().getNode('readout_control') rc.configureMenu(menu)
def testTTS(self): self.resetEverything() self.configMP7DAQ() self.configAMC13() # AMC13 must go in Run Mode self._amc13.startRun() self.linkStatus('TTS check - initial') time.sleep(1) ttsStates = [ ('Warning', 0x1), ('OOS', 0x2), ('Busy', 0x4), ('Ready', 0x8), ('Error', 0xc), ('Disconnected', 0xf), ] for state, code in ttsStates: logging.notice('TTS check: Force %s', state) mp7daq.hw().getNode('readout.tts_csr.ctrl.tts_force').write(1) mp7daq.hw().getNode('readout.tts_csr.ctrl.tts').write(code) mp7daq.hw().dispatch() time.sleep(2) self.linkStatus('TTS check')
def evapcheck(self, op, thresh=0.20): 'Update amount of evaporation and check for issues' if self.plate.name == "Samples": dt = clock.pipetting - self.lastevapupdate # Assume no evaporation while in PTC if dt < -0.1: # This may happen during thermocycler operation since pipetting while thermocycling is moved to pipthermotime after waitpgm() is called logging.notice( "%s: clock went backwards: pipetting=%f, lastevapupdate=%f, dt=%f -- probably OK due to counting pipetting time during PTC operation" % (self.name, clock.pipetting, self.lastevapupdate, dt)) else: dt = clock.elapsed() - self.lastevapupdate if dt < -0.1: logging.error( "%s: clock went backwards: elapsed=%f, lastevapupdate=%f, dt=%f" % (self.name, clock.elapsed(), self.lastevapupdate, dt)) if dt <= 0.1: return for i in range( 10): # Break it into smaller steps since volume affects rate evaprate = self.plate.getevaprate(max(0, self.volume - self.evap)) self.evap += evaprate * dt / 3600 / 10 if op == 'aspirate' and self.evap > thresh * self.volume and self.evap > 2.0 and self.volume > 0: pctevap = self.evap / self.volume * 100 logging.warning( " %s (%s.%s, vol=%.1f ul) may have %.1f ul of evaporation (%.0f%%)" % (self.name, str(self.plate), self.plate.wellname( self.well), self.volume, self.evap, pctevap)) if "evap" in __historyOptions: self.history = self.history + (' [Evap: %0.1f ul]' % (self.evap)) self.lastevapupdate += dt
def _insert_layout(mngr: Manager, meta : dict, native : bool = False): with mngr.sql_ctx as curs: layout = Layouts.NATIVE if native else None graphid = Id(meta['graph']) fingerprint = prepare_fingerprint(meta.get('layout')) if not native and mngr.sql_select_curs(curs, 'Layouts', graph=graphid, layout=layout, fingerprint=fingerprint): logging.notice( "Discarding unknown layout of graph {gid!s} with fingerprint {fp!s} (already exists)" .format(gid=graphid, fp=Id(fingerprint)) ) return False layoutid = mngr.make_unique_layout_id(curs) filename = mngr.make_layout_filename(graphid, layoutid, layout=layout) mngr.sql_insert_curs( curs, 'Layouts', id=layoutid, graph=graphid, layout=layout, file=filename, fingerprint=fingerprint ) os.makedirs(os.path.dirname(filename), exist_ok=True) if native: graphfilename = get_one(r['file'] for r in mngr.sql_select_curs(curs, 'Graphs', id=graphid)) directory = os.path.dirname(filename) symlinktarget = os.path.relpath(graphfilename, start=directory) logging.info("Creating symbolic link: {!r} -> {!r}".format(filename, symlinktarget)) os.symlink(symlinktarget, filename) else: logging.debug("Renaming layout file {!r} to {!r}".format(meta['filename-layout'], filename)) os.rename(meta['filename-layout'], filename) return True
def pip_install_packages(packages_list, warn_only=False): """ Install one or more packages, via PIP. The function just spawns :program:`pip` with argument ``install``. :param packages_list: a list of strings representing PIP packages names. For real-life examples, see the `upgrades` modules (notably `daemon`). :param warn_only: see :func:`install_packages`. .. versionadded:: 1.3 Before the WMI2, the package management foundations didn't exist. """ tname = stylize(ST_NAME, current_thread().name) pnames = u', '.join(stylize(ST_NAME, x) for x in packages_list) logging.notice(_(u'{0}: Installing needed packages {1} from source before ' u'continuing. Please wait…').format(tname, pnames)) out, err = process.execute([ 'pip', 'install' ] + packages_list) if err: logging.warn_or_raise(_(u'An error occured while installing package(s) ' u'{0}! PIP install Log follows:').format(pnames) + u'\n' + err, warn_only=warn_only) else: logging.notice(_(u'{0}: Successfully installed package(s) {1} via {2}.').format( tname, pnames, stylize(ST_NAME, 'PIP')))
def unregister_monitor(self, muuid): # we have to import 'logging' not at the top of the module import logging self.setup_listener_gettext() found = None with options.monitor_lock: for t in options.monitor_listeners[:]: if t.monitor_uuid == muuid: found = t options.monitor_listeners.remove(t) break if found: del t.monitor_facilities del t.monitor_uuid del t.monitor_lock else: logging.warning(_(u'Monitor listener with UUID %s not found!') % muuid) logging.notice(_(u'Trace session UUID {0} ended.').format( stylize(ST_UGID, muuid)))
def logwrite(logcontents,loglevel = "error"): """ Get the error message and store in the log file """ logpath='/var/log/k8s-manager/' global hostname hostname = os.uname()[1] if not os.path.isdir(logpath): os.makedirs(logpath) logdata=datetime.datetime.now() logtime=logdata.strftime('%Y-%m-%d %H:%M:%S') daylogfile=logpath+'/'+'k8s-manager' + '.log' logging.basicConfig(filename = daylogfile,level = logging.DEBUG) if str(loglevel) == "info": logging.info(" " + str(logtime) + " " + hostname + " " + os.path.basename(sys.argv[0]).split(".")[0] \ + ": " + str(logcontents)) elif str(loglevel) == "warning": logging.warning(" " + str(logtime) + " " + hostname + " " + os.path.basename(sys.argv[0]).split(".")[0] \ + ": " + str(logcontents)) elif str(loglevel) == "notice": logging.notice(" " + str(logtime) + " " + hostname + " " + os.path.basename(sys.argv[0]).split(".")[0] \ + ": " + str(logcontents)) else: logging.error(" " + str(logtime) + " " + hostname + " " + os.path.basename(sys.argv[0]).split(".")[0] \ + ": " + str(logcontents))
def parse_args2(default_log_filename=None, parser=None): import argparse if not parser: parser = argparse.ArgumentParser() parser.add_argument('-v', '--verbose', action='count', help="verbose", default=0) parser.add_argument('-o', '--logfile', default=default_log_filename) try: import argcomplete argcomplete.autocomplete(parser) except: pass args = parser.parse_args() loggers = [logging.root] if args.verbose == 0: for logger in loggers: init_logging(logger, logging.NOTICE, True, args.logfile) elif args.verbose == 1: for logger in loggers: init_logging(logger, logging.INFO, True, args.logfile) elif args.verbose > 1: for logger in loggers: init_logging(logger, logging.DEBUG, True, args.logfile) logging.notice("start running: " + ' '.join(sys.argv)) logging.info(args) return args
def _compute_prop_outer(mngr: Manager, prop: Properties, kern: Kernels, badlog: BadLog, graphid: Id = None, layoutid: Id = None, layoutfile: str = None, progress: float = None): what = { Kernels.BOXED: "discrete", Kernels.GAUSSIAN: "continuous" }.get(kern, "other") if badlog.get_bad(Actions.PROPERTIES, layoutid, prop): logging.notice( "Skipping computation of {:s} property {:s} for layout {!s}". format(what, prop.name, layoutid)) return prefix = "[{:6.2f} %] ".format( 100.0 * min(1.0, progress)) if progress is not None else "" logging.info(prefix + "Computing {:s} property {:s} for layout {!s} ...".format( what, prop.name, layoutid)) directory = _get_directory(mngr, layoutid, prop, kern, make=True) try: _compute_prop_inner(mngr, prop, kern, layoutid, layoutfile, directory=directory) except RecoverableError as e: badlog.set_bad(Actions.PROPERTIES, layoutid, prop, msg=str(e)) logging.error( "Cannot compute {:s} property {:s} for layout {!s} of graph {!s}: {!s}" .format(what, prop.name, layoutid, graphid, e))
def fill_empty_var(var): ''' sets variable to e (estimate in phyml) if empty ''' if var is None: logging.notice('unable to parse %s; PhyML will estimate this in subsequent runs' % var) return 'e' else: return var
def run(board, enablechans=None, tdr_fmt=None, dmx_hdrfmt=None, dmx_valfmt=None, s1_bc0fmt=None): ''' Configures the MP7 formatter. Arguments: * board - The MP7Controller FunctorInterface for that board * tdr_fmt - TDR header formatter options. Valid values: None , or map with keys 'strip' and 'insert', each value True or False * dmx_hdrfmt - Demux header formatter options. Valid values: same as tdr_fmt * dmx_valfmt - Demux datavalid-override formatter options. Valid values: None, 'disable', or map with keys 'start' and 'stop', each value a 2-tuple of ints (i.e. (bx, clock_cycle)) * s1_bc0fmt - Stage-1 formatter options. Valid values: None, 'disable', or int (the bx number) ''' logging.notice("Configuring formatters") cm = hlp.channelMgr(board,enablechans) fmt = board.getFormatter() ctrl = board.getCtrl() datapath = board.getDatapath() if ( not any([tdr_fmt, dmx_hdrfmt, dmx_valfmt, s1_bc0fmt]) ): logging.warn('Nothing to do') return if tdr_fmt is not None: cm.configureHdrFormatters(mp7.FormatterKind.kTDRFormatter, tdr_fmt['strip'], tdr_fmt['insert']) if dmx_hdrfmt is not None : cm.configureHdrFormatters(mp7.FormatterKind.kDemuxFormatter, dmx_hdrfmt['strip'], dmx_hdrfmt['insert']) if dmx_valfmt is not None: # print mp7.orbit.Point(*dmx_valfmt['start']),mp7.orbit.Point(*dmx_valfmt['stop']) if dmx_valfmt == 'disable': cm.disableDVFormatters() else: cm.configureDVFormatters(mp7.orbit.Point(*dmx_valfmt['start']),mp7.orbit.Point(*dmx_valfmt['stop']))
def clean_model(self): assert os.path.realpath(os.getcwd()) == os.path.realpath( self.abs_datadir) logging.notice("Deleting existing model ...") with self.sql_ctx as curs: curs.execute("DROP TABLE `TestScores`") if os.path.isdir(self.nndir): shutil.rmtree(self.nndir)
def clean_layouts(self): assert os.path.realpath(os.getcwd()) == os.path.realpath( self.abs_datadir) logging.notice("Deleting all existing layouts ...") with self.sql_ctx as curs: curs.execute("DROP TABLE `Layouts`") if os.path.isdir(self.layoutdir): shutil.rmtree(self.layoutdir)
def print_errors(self): items = self.get_errors() if not items: logging.notice("No errors") else: logging.error(f"{len(items)} Errors:") for w in items: logging.error(f"\t{w.msg}")
def ps(self): logging.notice("ps %s" % self) for mongod in self.mongod_arr: Mongod(mongod).ps() try: self._get_primary() except: pass
def print_warnings(self): items = self.get_warnings() if not items: logging.notice("No warnings") else: logging.warning(f"{len(items)} Warnings:") for w in items: logging.warning(f"\t{w.msg}")
def shake(self,plate,dur=60,speed=None,accel=5,returnPlate=True,samps=None,force=False): if self.ptcrunning and plate==decklayout.SAMPLEPLATE: self.waitpgm() # Move the plate to the shaker, run for the given time, and bring plate back allsamps=Sample.getAllOnPlate(plate) if samps is None: samps=allsamps if all([x.isMixed() for x in samps]) and not force: logging.notice( "No need to shake "+plate.name+", but doing so anyway.") minspeed=0 maxspeed=2000 for x in samps: (a,b)=x.getmixspeeds() minspeed=max([a,minspeed]) maxspeed=min([b,maxspeed]) if speed is None: if minspeed<maxspeed: speed=max((maxspeed+minspeed)/2,maxspeed-50) # Mix as fast as safely possible (but always above minspeed) else: speed=maxspeed if speed<minspeed-2 or speed>maxspeed+2: others="" for x in allsamps: (a,b)=x.getmixspeeds() if b<minspeed or a>maxspeed: if a is not None and a>0: others+=" {%s: %.1ful,G=%.2f%%,min=%.0f,max=%.0f}"%(x.name,x.volume,x.glycerolfrac()*100,a,b) else: others+=" {%s: %.1ful,G=%.2f%%,max=%.0f}"%(x.name,x.volume,x.glycerolfrac()*100,b) logging.mixwarning("Mixing %s at %.0f RPM; minspeed(%.0f) > maxspeed(%.0f), limits=[%s]"%(plate.name,speed,minspeed,maxspeed,others)) else: logging.notice("Mixing %s at %.0f RPM ( min RPM=%.0f, max RPM=%.f)"%(plate.name, speed, minspeed, maxspeed)) oldloc=plate.curloc self.moveplate(plate,"Shaker",returnHome=False) global __shakerActive __shakerActive=True worklist.pyrun("BioShake\\bioexec.py setElmLockPos") worklist.pyrun("BioShake\\bioexec.py setShakeTargetSpeed%d"%speed) worklist.pyrun("BioShake\\bioexec.py setShakeAcceleration%d"%accel) worklist.pyrun("BioShake\\bioexec.py shakeOn") self.starttimer() Sample.shaken(plate.name,speed) Sample.addallhistory("(S%d@%.0f)"%(dur,speed),onlyplate=plate.name) self.waittimer(dur) worklist.pyrun("BioShake\\bioexec.py shakeOff") self.starttimer() self.waittimer(accel+4) worklist.pyrun("BioShake\\bioexec.py setElmUnlockPos") __shakerActive=False if returnPlate: self.moveplate(plate,oldloc)
def run(board): # if not command.startswith('board'): # throw ArgumentError('Alakazham!') try: import IPython except ImportError: logging.notice('Failed to load IPython') return logging.notice('Starting IPython - %s connected to variable \'board\'', board.id()) IPython.embed()
def stage(self,stagename,reagents,sources,samples,volume,finalx=1.0,destMix=True,dilutant=None): # Add water to sample wells as needed (multi) # Pipette reagents into sample wells (multi) # Pipette sources into sample wells # Concs are in x (>=1) # Sample.printallsamples("Before "+stagename) # print "\nStage: ", stagename, "reagents=",[str(r) for r in reagents], ",sources=",[str(s) for s in sources],",samples=",[str(s) for s in samples],str(volume) if len(samples)==0: logging.notice("No samples") return if dilutant is None: dilutant=decklayout.WATER worklist.comment("Stage: "+stagename) if not isinstance(volume,list): volume=[volume for i in range(len(samples))] for i in range(len(volume)): assert volume[i]>0 volume[i]=float(volume[i]) reagentvols=[1.0/x.conc.dilutionneeded()*finalx for x in reagents] if len(sources)>0: sourcevols=[volume[i]*1.0/sources[i].conc.dilutionneeded()*finalx for i in range(len(sources))] while len(sourcevols)<len(samples): sourcevols.append(0) watervols=[volume[i]*(1-sum(reagentvols))-samples[i].volume-sourcevols[i] for i in range(len(samples))] else: watervols=[volume[i]*(1-sum(reagentvols))-samples[i].volume for i in range(len(samples))] if min(watervols)<-0.01: msg="Error: Ingredients add up to more than desired volume by %.1f ul"%(-min(watervols)) for s in samples: if s.volume>0: msg=msg+" Note: %s already contains %.1f ul\n"%(s.name,s.volume) logging.error(msg) watervols2=[w if w<=2 else 0 for w in watervols] # Move later since volume is low watervols=[w if w>2 else 0 for w in watervols] # Only move>=2 at beginning if any([w>0.01 for w in watervols]): self.multitransfer(watervols,dilutant,samples,(False,destMix and (len(reagents)+len(sources)==0))) for i in range(len(reagents)): self.multitransfer([reagentvols[i]*v for v in volume],reagents[i],samples,(True,destMix and (len(sources)==0 and i==len(reagents)-1))) if any([w>0.01 for w in watervols2]): self.multitransfer(watervols2,dilutant,samples,(False,destMix and (len(reagents)+len(sources)==0))) if len(sources)>0: assert len(sources)<=len(samples) for i in range(len(sources)): self.transfer(sourcevols[i],sources[i],samples[i],(True,destMix))
def run(board, enablechans=None, alignTo=None, alignMargin=3, alignFreeze=False, dmx_delays=False): cm = hlp.channelMgr(board, enablechans) logging.notice('Aligning links') delays = None if dmx_delays: #delays = {} #nMaxMps = 12 #clockRatio = 6 #nMpOutputs = 6 #for mp in xrange(nMaxMps): # for c in xrange(nMpOutputs): # delays[c*nMaxMps+mp] = mp*clockRatio delays = {} nMaxMps = 6 nMaxMps_2 = 12 clockRatio = 6 nMpOutputs = 6 nMpOutputs_2 = 12 for mp in xrange(nMaxMps - 1): for c in xrange(nMpOutputs): delays[c * nMaxMps + mp] = mp * clockRatio for mp in xrange(nMaxMps, nMaxMps_2): for c in xrange(nMpOutputs, nMpOutputs_2): delays[c * nMaxMps + mp - 6] = mp * clockRatio - 6 if alignTo: p = mp7.orbit.Point(alignTo[0], alignTo[1]) args = (p, ) if delays == None else (p, delays) cm.align(*args) else: args = (alignMargin, ) if delays == None else (delays, alignMargin) cm.minimizeAndAlign(*args) if alignFreeze: cm.freezeAlignment()
def console_start(self, is_tty=True): # we have to import 'logging' not at the top of the module import logging, events from _settings import settings # other foundations from workers import workers # import things that we need in the remote console from licorn.core import version, LMC from threads import RLock, Event # This is a little too much #self._console_namespace = sys._getframe(1).f_globals self._console_isatty = is_tty self._console_namespace = { 'version' : version, 'settings' : settings, 'options' : options, 'exceptions' : exceptions, 'workers' : workers, 'events' : events, 'LicornEvent' : events.LicornEvent, 'daemon' : self.licornd, 'threads' : self.licornd.threads, 'uptime' : self.licornd.uptime, 'LMC' : LMC, 'dump' : ltrace_dump, 'fulldump' : ltrace_fulldump, 'dumpstacks' : ltrace_dumpstacks, 'RLock' : RLock, 'Event' : Event, } self._console_interpreter = self.__class__.BufferedInterpreter( self._console_namespace) self._console_completer = rlcompleter.Completer(self._console_namespace) t = current_thread() logging.notice(_(u'{0}: Interactive console requested by {1} ' u'from {2}.').format(self.licornd, stylize(ST_NAME, t._licorn_remote_user), stylize(ST_ADDRESS, '%s:%s' % (t._licorn_remote_address, t._licorn_remote_port))), to_listener=False) if is_tty: remote_output(_(u'Welcome into licornd\'s arcanes…') + '\n', clear_terminal=True, char_delay=0.025) else: remote_output(_(u'>>> Entered batched remote console.') + '\n', _message_channel_=2)
def _get_same_graph_id_curs(mngr, curs, lhslid, rhslid, exception=FatalError): lhsgid = _get_graph_id_curs(mngr, curs, lhslid, exception=exception) rhsgid = _get_graph_id_curs(mngr, curs, rhslid, exception=exception) if lhsgid != rhsgid: logging.notice("Layout {!s} corresponds to graph {!s}".format( lhslid, lhsgid)) logging.notice("Layout {!s} corresponds to graph {!s}".format( rhslid, rhsgid)) raise exception( "The layouts {!s} and {!s} correspond to different graphs".format( lhslid, rhslid)) return lhsgid
def transfer(self, volume, src, dest, mix=(True,False), getDITI=True, dropDITI=True): if self.ptcrunning and (src.plate==decklayout.SAMPLEPLATE or dest.plate==decklayout.SAMPLEPLATE)>0: self.waitpgm() if volume>self.MAXVOLUME: destvol=dest.volume reuseTip=destvol<=0 msg="Splitting large transfer of %.1f ul into smaller chunks < %.1f ul "%(volume,self.MAXVOLUME) if reuseTip: msg+= "with tip reuse" else: msg+= "without tip reuse" logging.notice(msg) self.transfer(self.MAXVOLUME,src,dest,mix,getDITI,False) self.transfer(volume-self.MAXVOLUME,src,dest,(mix[0] and not reuseTip,mix[1]),False,dropDITI) return cmt="Add %.1f ul of %s to %s"%(volume, src.name, dest.name) ditivolume=volume+src.inliquidLC.singletag if mix[0] and not src.isMixed(): cmt=cmt+" with src mix" ditivolume=max(ditivolume,src.volume) if mix[1] and dest.volume>0 and not src.isMixed(): cmt=cmt+" with dest mix" ditivolume=max(ditivolume,volume+dest.volume) # print "Mix volume=%.1f ul"%(ditivolume) if mix[0] and not src.isMixed() and (src.plate==decklayout.SAMPLEPLATE or src.plate==decklayout.DILPLATE): worklist.comment("shaking for src mix of "+src.name) self.shakeSamples([src]) # Need to do this before allocating a tip since washing during this will modify the tip clean states if self.useDiTis: tipMask=4 if getDITI: worklist.getDITI(tipMask&self.DITIMASK,ditivolume) else: tipMask=self.cleantip() #print "*",cmt worklist.comment(cmt) if mix[0] and (not src.isMixed() or not src.wellMixed): if (src.plate==decklayout.SAMPLEPLATE or src.plate==decklayout.DILPLATE): logging.notice("Forcing pipette mix of "+src.name) worklist.comment("pipette mix for src mix of "+src.name) src.mix(tipMask) # Manual mix (after allocating a tip for this) src.aspirate(tipMask,volume) dest.dispense(tipMask,volume,src) if mix[1]: dest.mix(tipMask,True) if self.useDiTis and dropDITI: worklist.dropDITI(tipMask&self.DITIMASK,decklayout.WASTE)
def clean_properties(self): assert os.path.realpath(os.getcwd()) == os.path.realpath( self.abs_datadir) logging.notice("Deleting all existing properties ...") tables = [ 'PropertiesDisc', 'PropertiesCont', 'Histograms', 'SlidingAverages', 'MajorAxes', 'MinorAxes' ] with self.sql_ctx as curs: for table in tables: curs.execute("DROP TABLE `{table:s}`".format(table=table)) if os.path.isdir(self.propsdir): shutil.rmtree(self.propsdir)
def handle_error(self, *args, **kwargs): try: raise except BrokenPipeError: logging.notice( "Could not send complete reply; client closed connection prematurely" ) except Exception as e: logging.critical( "Uncaught exception of type {:s} in main loop of HTTP server". format(type(e).__name__)) dump_current_exception_trace() raise SystemExit(True)
def _get_log_level_environment(): envval = os.environ.get(LOG_LEVEL_ENVVAR) if envval is None: return LOG_LEVEL_DEFAULT try: return LogLevels.parse(envval) except ValueError as e: logging.warning( "Ignoring bogous value of environment variable {:s}: {!r}".format( LOG_LEVEL_ENVVAR, envval)) logging.notice("Valid logging levels are: " + ', '.join(l.name for l in LogLevels)) return LOG_LEVEL_DEFAULT
def overrideDataValidPattern(bData, replaceChs, masterCh): ''' Returns copy of board data with data valid pattern of several channels updated to match the data valid pattern of a master channel. Arguments: - bData : Input board data object - replaceChs : List of IDs of channels whose data valid bits will be updated - masterCh : ID of channel whose data valid pattern will be used. ''' assert( isinstance(bData, mp7.BoardData) ) assert( isinstance(replaceChs, list) ) assert( isinstance(masterCh, int) ) indata = bData logging.notice('Master is %d', masterCh) master = indata[masterCh] valids = [ (f.data>>32) for f in master ] fakedata = mp7.LinkData() for m in master: f = mp7.Frame() f.valid = m.valid fakedata.append(f) # if args.offset == 0: # pass # elif args.offset < 0: # fakedata = fakedata[-args.offset:] + [0]*-args.offset # elif args.offset > 0: # fakedata = [0]*args.offset + fakedata[:-args.offset] # print 'len',len(valids),len(fakedata) # for v,f in zip(valids,fakedata): # print v,hex(f) # for i,v in enumerate(valids): # print i,v outdata = mp7.BoardData(indata.name()) for l in indata: if not l.first in replaceChs: outdata.add(l.first, l.second) continue outdata.add(l.first,fakedata) return outdata
def _log_data(data, description="Bogus data"): logging.notice(description + " was {:d} bytes long".format(len(data))) try: text = data if isinstance(data, str) else data.decode() for (i, line) in enumerate(map(str.rstrip, text.splitlines())): logging.notice("{:6d}: {:s}".format(i + 1, line)) except UnicodeError as e: (dumpfd, dumpfilename) = tempfile.mkstemp(prefix='dump-', suffix='.dat') logging.error( description + " could not be decoded as Unicode data;" + " dumping binary data to file {!r} ...".format(dumpfilename)) with os.fdopen(dumpfd, 'wb') as ostr: ostr.write(data)
def _gen_import(mngr : Manager, gen : Generators, src : ImportSource, bl : _BucketList): assert gen.imported total = bl.total() with src as archive, mngr.make_tempdir() as tempdir: cmd = [ os.path.join(mngr.abs_bindir, 'src', 'generators', 'import'), '--format={:s}'.format(src.format), '--output={:s}'.format(os.path.join(tempdir, enum_to_json(gen) + GRAPH_FILE_SUFFIX)), '--meta={:s}'.format('STDIO'), ] if src.layout is not None: cmd.append('--layout={:d}'.format(int(src.layout))) else: cmd.append('--output-layout={:s}'.format(os.path.join(tempdir, 'layout' + GRAPH_FILE_SUFFIX))) if src.simplify: cmd.append('--simplify') native = src.layout is True poisoned = src.layout is None cmd.append('STDIO' if src.compression is None else 'STDIO:' + src.compression) # Let me open that file for you archlen = len(archive) # This is a potentially expensive operation so do it only once count = 0 logging.info("{:s} archive {!r} contains {:d} graphs in total".format(gen.name, archive.name, archlen)) for (i, cand) in enumerate(archive): if not bl: break progress = i / archlen if total is None else count / total pretty = archive.prettyname(cand) logging.info("[{:6.2f} %] Considering {:s} graph {:d} of {:d} from {!r} of {!r} ...".format( 100.0 * progress, gen.name, i + 1, archlen, pretty, archive.name)) try: with archive.get(cand) as istr: meta = mngr.call_graphstudy_tool(cmd, meta='stdout', stdin=istr) except Exception as e: if not isinstance(e, RecoverableError) and not archive.is_likely_read_error(e): raise logging.error("Cannot import {:s} graph {!r}: {!s}".format(gen.name, pretty, e)) continue actualsize = GraphSizes.classify(meta['nodes']) if not bl.offer(actualsize): logging.notice("Discarding {:s} {:s} graph (not wanted)".format(actualsize.name, gen.name)) continue if _insert_graph(mngr, gen, meta, native=native, poisoned=poisoned): bl.decrement(actualsize) count += 1 if meta.get('layout'): _insert_layout(mngr, meta, native=native) logging.info("[{:6.2f} %] Imported {:d} {:s} graphs from {!r}".format(100.0, count, gen.name, archive.name)) for (size, diff) in bl.items(): logging.warning("{:s} archive exhausted but {:d} {:s} graphs are still missing".format( gen.name, diff, size.name))
def chooseLC(self, aspirateVolume=0): if self.volume - aspirateVolume >= MINLIQUIDDETECTVOLUME: if aspirateVolume == 0: return self.inliquidLC # Not aspirating, should be fine # Try using liquid detection initheight = self.plate.getliquidheight( self.volume) # True height at start finalheight = self.plate.getliquidheight( self.volume - aspirateVolume) # True height at end of aspirate initgemvolume = self.plate.getgemliquidvolume( initheight) # How much will Gemini think we have at start if initgemvolume < aspirateVolume + 15: # Not enough msg = "Aspirate %.1f ul from %.1f ul, gem will think initial volume is %.1ful which is too low to reliably work - not using LD" % ( aspirateVolume, self.volume, initgemvolume) logging.notice(msg) else: finalgemvolume = initgemvolume - aspirateVolume finalgemheight = self.plate.getgemliquidheight(finalgemvolume) finaltipdepth = self.inliquidLC.submerge - (finalgemheight - finalheight) msg = "Aspirate %.1f ul from %.1f ul in %s: height goes from %.1f to %.1f mm, gem will think initial volume is %.1ful and final height %.1f mm" % ( aspirateVolume, self.volume, self.name, initheight, finalheight, initgemvolume, finalgemheight) if finalgemheight - 0.1 < self.inliquidLC.submerge: # Gemini won't be able to submerge as much as requested logging.notice( msg + ": Gemini would think there's not enough liquid to submerge %.1f mm - not using LD" % self.inliquidLC.submerge) elif finaltipdepth < 0.1: # Tracking is off so much that tip will break surface of water during operation logging.warning( msg + ": tip will not be submerged enough (depth=%.1f mm) - not using LD" % finaltipdepth) else: # Should be good #logging.notice(msg) return self.inliquidLC # No liquid detect: if self.volume == 0 and aspirateVolume == 0: return self.emptyLC elif self.hasBeads and self.plate.curloc == "Magnet": return self.beadsLC else: return self.bottomLC
def get_localdata(dbpool, locale): # localdata 에서 최신 data를 가져옴 with dbpool.scoped_get_group("common", readonly=True) as db: rs = db.query("select version, country, localdata from localdata where country=%s and mdate=(select MAX(mdate) from localedata where country=%s)", (locale, locale,)) if not rs: raise DBAccessError("get_localedata not found error") for ld in rs: localedata_version, country, localedata = ld if country == locale: localedata_pb = localedata_pb2.LocaleData() localedata_pb.ParseFromString(localedata) LDManager.set_localedata_with_key(localedata_pb, (localedata_version, country)) logging.notice("localedata load complete version: %s country: %s", localedata_version, country) return (localedata_version, country)
def __dump_model(self): try: time_ = datetime.datetime.fromtimestamp(time.time()) year = time_.year day = time_.day month = time_.month path = os.path.join(self.dump_path, str(year), str(month), str(day)) if not os.path.exists(path): os.makedirs(path) self.logger('create {0} dir : '.format(path)) filename_model = path+str(time.time())+' model '+self.method+'.sav' with open(filename_model, 'wb') as file: pickle.dump(self.result, file) logging.notice('Create dump {0} '.format(filename_model)) except Exception as e: logging.notice('Dump processing errors : {0}'.format(repr(e)))
def get_PhyML_stats(value=None): ''' parses PhyML stats ''' if os.path.isfile('init_phyml_stats.txt') and os.path.getsize('init_phyml_stats.txt') > 0: for phymlstats in open('init_phyml_stats.txt', 'r'): if phymlstats.startswith('. Log-likelihood:'): loglike = phymlstats.strip().split()[-1] logging.info('calculated %s log-likelihood from PhyML statistics' % loglike) elif phymlstats.startswith(' - Gamma shape parameter:'): alpha = phymlstats.strip().split()[-1] logging.info('calculated %s gamma shape from PhyML statistics' % alpha) elif phymlstats.startswith(' - f(A)='): fA = phymlstats.strip().split()[-1] elif phymlstats.startswith(' - f(C)='): fC = phymlstats.strip().split()[-1] elif phymlstats.startswith(' - f(G)='): fG = phymlstats.strip().split()[-1] elif phymlstats.startswith(' - f(T)='): fT = phymlstats.strip().split()[-1] freqs = '%s,%s,%s,%s' % (fA, fC, fG, fT) # Order (A,C,G,T) matters for PhyML logging.info('calculated %s frequencies of A,C,G,T from PhyML statistics' % freqs) elif phymlstats.startswith(' A <-> C'): A2C = phymlstats.strip().split()[-1] elif phymlstats.startswith(' A <-> G'): A2G = phymlstats.strip().split()[-1] elif phymlstats.startswith(' A <-> T'): A2T = phymlstats.strip().split()[-1] elif phymlstats.startswith(' C <-> G'): C2G = phymlstats.strip().split()[-1] elif phymlstats.startswith(' C <-> T'): C2T = phymlstats.strip().split()[-1] elif phymlstats.startswith(' G <-> T'): G2T = phymlstats.strip().split()[-1] Transitions = (Decimal(A2G) + Decimal(C2T)) # A<->G && C<->T logging.info('calculated %s transitions from PhyML statistics' % Transitions) Transversions = (Decimal(A2C) + Decimal(A2T) + Decimal(C2G) + Decimal(G2T)) # All (4) others logging.info('calculated %s transversions from PhyML statistics' % Transversions) TsTv = (Transitions / Transversions).quantize(Decimal('.000001'), rounding=ROUND_UP) logging.info('calculated %s Ts:Tv from PhyML statistics' % TsTv) for var in [alpha, freqs, TsTv]: var = fill_empty_var(var) else: logging.notice('unable to open phyml stats file to optimize parameters') logging.notice('gamma shape, Ts:Tv, and f(A),f(T),f(C),f(G) values will be estimated in subsequent runs') alpha = 'e' TsTv = 'e' freqs = 'm' return alpha, TsTv, freqs
def _quick_archive_import_eh(__cache=[ ]): global _QUICK_ARCHIVE_IMPORT if _QUICK_ARCHIVE_IMPORT is None: _QUICK_ARCHIVE_IMPORT = False envvar = 'MSC_QUICK_ARCHIVE_IMPORT' envval = os.getenv(envvar) if envval is not None: try: quick = int(envval) except ValueError: logging.warning("Ignoing bogous value of environment variable {!s}={!r}".format(envvar, envval)) else: _QUICK_ARCHIVE_IMPORT = (quick > 0) if _QUICK_ARCHIVE_IMPORT: logging.notice("Archives won't be scanned if requested graph count is unbounded ({!s}={!r})" .format(envvar, quick)) return _QUICK_ARCHIVE_IMPORT
def __dump_result(self): try: time_ = datetime.datetime.fromtimestamp(time.time()) year = time_.year day = time_.day month = time_.month path = os.path.join(self.dump_path, str(year), str(month), str(day)) if not os.path.exists(path): os.makedirs(path) self.logger('create {0} dir : '.format(path)) filename_data = path+str(time.time())+' centers '+self.method with open(filename_data, 'wb') as file: file.write('cluster_centers\n') file.write(self.get_centers()) logging.notice('Create dump data {0} '.format(filename_data)) except Exception as e: logging.notice('Dump processing errors : {0} '.format(repr(e)))
def _insert_graph(mngr : Manager, gen : Generators, meta : dict, native : bool = False, poisoned : bool = False): assert not (native and poisoned) graphid = Id(meta['graph']) filename = mngr.make_graph_filename(graphid, generator=gen) with mngr.sql_ctx as curs: if mngr.sql_select_curs(curs, 'Graphs', id=graphid): logging.notice("Discarding {:s} graph {!s} (already exists)".format(gen.name, graphid)) return False mngr.sql_insert_curs( curs, 'Graphs', id=graphid, generator=gen, file=filename, nodes=meta['nodes'], edges=meta['edges'], native=native, poisoned=poisoned, seed=encoded(meta.get('seed')), ) os.makedirs(os.path.dirname(filename), exist_ok=True) logging.debug("Renaming graph file {!r} to {!r}".format(meta['filename'], filename)) os.rename(meta['filename'], filename) return True
def _compute_all_metrics(mngr : Manager, metr : Metrics, sizes : set, badlog): for row in mngr.sql_select('Layouts'): graphid = row['graph'] if _get_graph_size(mngr, graphid) not in sizes: continue layoutid = row['id'] layoutfile = row['file'] if mngr.sql_select('Metrics', layout=layoutid, metric=metr): continue if badlog.get_bad(Actions.METRICS, layoutid, metr): logging.notice("Skipping computation of metric {:s} for layout {!s}".format(metr.name, layoutid)) continue logging.info("Computing metric {m:s} for layout {l!s} ...".format(m=metr.name, l=layoutid)) try: _compute_metric(mngr, metr, layoutid, layoutfile) except RecoverableError as e: badlog.set_bad(Actions.METRICS, layoutid, metr, msg=str(e)) logging.error("Cannot compute metric {metr:s} for layout {lid!s} of graph {gid!s}: {!s}".format( e, metr=metr.name, lid=layoutid, gid=graphid))
def job(self, job ): if self.queue.full(): logging.notice( "didn't queue job, queue is full" ) return False jid = self.nextjid info = dict( zip( ("url","pollid","choice","count"), job ) ) info['jid'] = jid info['qtime'] = time() info['stime'] = None info['etime'] = None info['status'] = "queued" self.jobs[jid] = info self.queue.put( jid, False ) logging.info( "job #%d queued", jid ) self.nextjid += 1 return jid
def _merger(self, src, dst, dryRun): try: if dryRun: # Be less verbose for dry runs. More detailed information is # likely to be misleading because of dry run limitations. logger.notice(lit("INFO_MIGRATE_MOVE_DRYRUN__S") % src) return root, ext = os.path.splitext(src) if ext == ".conf": if os.path.lexists(dst): # Combine src and dst confs; don't override anything in dst. combinedConf = comm.readConfFile(src) dstConf = comm.readConfFile(dst) for k in dstConf.keys(): if combinedConf.has_key(k): combinedConf[k].update(dstConf[k]) else: combinedConf[k] = dstConf[k] # In case we don't have permission to truncate the # file, just remove it preemptively. safe_remove(dst) logger.notice(lit("INFO_MIGRATE_MERGE_CONF__S_S") % (src, dst)) comm.writeConfFile(dst, combinedConf) else: comm.copyItem(src, dst) else: if os.path.lexists(dst): logger.notice(lit("INFO_MIGRATE_IGNORE_DUP__S_S") % (src, dst)) else: comm.copyItem(src, dst) except Exception, e: logger.warn(lit("WARN_MIGRATE_NO_CREATE__S") % dst) logger.exception(e)
def migrate(self, dryRun=False): self._verify() if not self.is_legacy(): return name = self.name() src = self.location() cleanup = True if (name == self._DEFAULT) or (name == "README"): logger.notice(lit("INFO_MIGRATE_OMIT__S") % name) elif name == self._LOCAL: if not dryRun: self._rearrange_conf_files(self._LOCAL) comm.mergeDirs(src, get_system_bundle_path(), dryRun, self._merger) else: if not dryRun: self._rearrange_conf_files(self._DEFAULT) collision = get_bundle(name) if collision is None: comm.moveItem(src, make_bundle_install_path(name), dryRun) cleanup = False else: logger.notice(lit("INFO_MIGRATE_COLLISION__S_S") % (collision.name(), collision.location())) logger.notice(lit("INFO_MIGRATE_OMIT__S") % name) if cleanup and not dryRun: logger.info(lit("INFO_MIGRATE_CLEANUP__S") % src) self.delete()
def chooseLC(self,aspirateVolume=0): if self.volume-aspirateVolume>=MINLIQUIDDETECTVOLUME: if aspirateVolume==0: return self.inliquidLC # Not aspirating, should be fine # Try using liquid detection initheight=self.plate.getliquidheight(self.volume) # True height at start finalheight=self.plate.getliquidheight(self.volume-aspirateVolume) # True height at end of aspirate initgemvolume=self.plate.getgemliquidvolume(initheight) # How much will Gemini think we have at start if initgemvolume<aspirateVolume+15: # Not enough msg="Aspirate %.1f ul from %.1f ul, gem will think initial volume is %.1ful which is too low to reliably work - not using LD"%(aspirateVolume,self.volume,initgemvolume) logging.notice(msg) else: finalgemvolume=initgemvolume-aspirateVolume finalgemheight=self.plate.getgemliquidheight(finalgemvolume) finaltipdepth=self.inliquidLC.submerge-(finalgemheight-finalheight) msg="Aspirate %.1f ul from %.1f ul: height goes from %.1f to %.1f mm, gem will think initial volume is %.1ful and final height %.1f mm"%(aspirateVolume,self.volume,initheight,finalheight,initgemvolume,finalgemheight) if finalgemheight-0.1<self.inliquidLC.submerge: # Gemini won't be able to submerge as much as requested logging.notice(msg+": Gemini would think there's not enough liquid to submerge %.1f mm - not using LD"%self.inliquidLC.submerge) elif finaltipdepth<0.1: # Tracking is off so much that tip will break surface of water during operation logging.notice(msg+": tip will not be submerged enough (depth=%.1f mm) - not using LD"%finaltipdepth) else: # Should be good #logging.notice(msg) return self.inliquidLC # No liquid detect: if self.volume==0 and aspirateVolume==0: return self.emptyLC elif self.hasBeads and self.plate.curloc=="Magnet": return self.beadsLC else: return self.bottomLC
def getmixspeeds(self): 'Get minimum, maximum speed for mixing this sample' if self.isMixed(): minspeed=0 else: minspeed=interpolate(self.plate.minspeeds,self.volume) if minspeed is None: assumeSpeed=1900 logging.notice("No shaker min speed data for volume of %.0f ul, assuming %.0f rpm"%(self.volume,assumeSpeed)) minspeed=assumeSpeed maxspeed=interpolate(self.plate.maxspeeds,self.volume) glycerol=self.glycerolfrac() if glycerol>0: gmaxspeed=interpolate(self.plate.glycerolmaxspeeds,self.volume) if glycerol>self.plate.glycerol: logging.notice("Sample %s contains %.1f%% Glycerol (more than tested of %.1f%%)"%(self.name,glycerol*100,self.plate.glycerol*100)) maxspeed=gmaxspeed else: maxspeed=maxspeed+(gmaxspeed-maxspeed)*(glycerol/self.plate.glycerol) if maxspeed<minspeed: logging.notice("%s with %.1ful and %.1f%% glycerol has minspeed of %.0f greater than maxspeed of %.0f"%(self.name,self.volume,glycerol*100,minspeed,maxspeed)) minspeed=maxspeed # Glycerol presence should also reduce minspeed return (minspeed,maxspeed)
def sharding_start(sharding): auth = sharding['auth'] configdb = ['%s:%d' % (i[0], i[1]) for i in sharding['configserver']] configdb = ','.join(configdb) logging.notice('............. start shard ') for shard in sharding['shard']: if shard['type'] == 'replset': replset_start(shard, auth) elif shard['type'] == 'mongod': mongod_start(shard['server'], auth=auth) logging.notice('............. start configserver ') for configserver in sharding['configserver']: configserver_start(configserver, auth) logging.notice('............. start mongos ') for mongos in sharding['mongos']: mongos_start(mongos, configdb, auth) @common.retry(Exception, tries=2) def add_shard(shard): if shard['type'] == 'replset': members = ['%s:%d'%(host,port) for (id, (host, port, path)) in enumerate(shard['mongod'])] members = ','.join(members) js =''' //use admin; sh.addShard( "%s/%s" ); ''' % (shard['replset_name'], members) elif shard['type'] == 'mongod': host,port,path = shard['server'] members = '%s:%d'%(host,port) js =''' //use admin; sh.addShard( "%s" ); ''' % (members) [ip, port, path] = sharding['mongos'][0] try: _run_js(ip, port, js, auth) except Exception as e: if str(e).find('E11000 duplicate key error index: config.shards.$_id_') >= 0: logging.warning('shard already added !!!') return if str(e).find('host already used') >= 0: logging.warning('shard already added !!!') return logging.warning('add shard return error with: \n' + str(e)) for shard in sharding['shard']: add_shard(shard) _sharding_status(sharding, auth) print "please run:" print "sh.enableSharding('report')" print "sh.shardCollection('report.jomo_report_2013053116', {uuid:1})"
def console_stop(self): # we have to import 'logging' not at the top of the module import logging t = current_thread() logging.notice(_(u'{0}: Interactive console terminated by {1} ' u'from {2}.').format(self.licornd, stylize(ST_NAME, t._licorn_remote_user), stylize(ST_ADDRESS, '%s:%s' % ( t._licorn_remote_address, t._licorn_remote_port))), to_listener=False) if self._console_isatty: # NOTE: there are console non-breakable spaces at choosen # places in the sentences for enhanced graphical effect. remote_output(_(u'Welcome back to Real World™. Have a nice day!') + u'\n', word_delay=0.25) else: remote_output(_(u'>>> batched remote console terminated.') + '\n', _message_channel_=2)
def start(self): logging.notice("+++ start replset") for x in self.shard_arr: Replset(x).start() logging.notice("+++ start configserve") for x in self.configserver_arr: Configserver(x).start() logging.notice("+++ start mongos") for x in self.mongos_arr: Mongos(x).start() self._adduser() logging.notice("+++ add shard") for shard in self.args["shard"]: self._do_addshard(shard) logging.notice("done!!") print common.shorten(self._runjs("sh.status()"), 1024) print "hint:" print "sh.enableSharding('db')" print "sh.shardCollection('db.collection', {uuid:1})"
def save(self, filename=None, batch=False, auto_answer=None): """ If the configuration file changed, backup the current file on disk, and save the current data into a new version (same name). If the current instance is a "memory-only" one, and no filename is given, raise an exception. """ if filename is None: filename = self.filename if self.__changed: if filename: if batch or logging.ask_for_repair(_(u'{0}: system file {1} ' u'must be modified for the configuration to be ' u'complete. Do it?').format( stylize(ST_NAME, self._caller), stylize(ST_PATH, self.filename)), auto_answer=auto_answer): fsapi.backup_file(filename) self.__save(filename) # Alter the property via the underlying private attribute, # else modifications are not allowed. self.__changed = False logging.notice(_(u'{0}: altered configuration file {1}.').format( stylize(ST_NAME, self._caller), stylize(ST_PATH, self.filename))) else: raise exceptions.LicornModuleError(_(u'{0}: configuration ' u'file {1} must be altered to continue.').format( self._caller, self.filename)) else: raise exceptions.LicornRuntimeError(_(u'%s: cannot save a ' u'file without any filename!') % self.name)
def execute(command, input_data='', dry_run=None): """ Execute a command (passed as a list or tuple) and roughly pipe some data into the executed program. Return the (eventual) stdout and stderr in a tuple. """ assert ltrace(TRACE_PROCESS, 'execute(%s)%s, dry_run=%s.' % (command, ' with input_data="%s"' % input_data if input_data != '' else '', dry_run)) from subprocess import Popen, PIPE if dry_run: logging.notice(_(u'{0:s}: dry_run({1}){2}.').format( stylize(ST_NAME, u'execute'), stylize(ST_COMMENT, u' '.join(command)), _(u' → sleep({0})').format(dry_run))) if type(dry_run) in (IntType, LongType, FloatType): time.sleep(float(dry_run)) return ('', '') try: if input_data != '': p = Popen(command, shell=False, stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True) return p.communicate(input_data) else: p = Popen(command, shell=False, stdout=PIPE, stderr=PIPE, close_fds=True) return p.communicate() except (OSError, IOError), e: logging.warning2(_(u'{0}: exception "{1}" while trying to ' u'process.execute({2}).').format( stylize(ST_NAME, current_thread().name), stylize(ST_ATTR, e), stylize(ST_COMMENT, ' '.join(command)))) raise
def send(cls, user_key, title, message, severity_level, url=None): ''' :param to: str pushover user key :param title: str :param message: str :param severity_level: int :param url: str - Default: None :returns: boolean ''' if not Config.data['pushover']['enabled']: logging.notice('pushover is not enabled in the config.') return False if severity_level < 3: priority = 2 elif severity_level < 5: priority = 1 else: priority = 0 params = { 'user': user_key, 'token': Config.data['pushover']['token'], 'title': '{}: {}'.format(Config.data['notification_prefix'], title), 'message': message, 'priority': priority, } if url: params['url'] = url if priority == 2: params['retry'] = 60 params['expire'] = 14400 # 4 hours requests.post( 'https://api.pushover.net/1/messages.json', params=params, ) return True
def dispose(self, volume, src, mix=False, getDITI=True, dropDITI=True): 'Dispose of a given volume by aspirating and not dispensing (will go to waste during next wash)' if self.ptcrunning and src.plate==decklayout.SAMPLEPLATE: self.waitpgm() if volume>self.MAXVOLUME: reuseTip=False # Since we need to wash to get rid of it msg="Splitting large transfer of %.1f ul into smaller chunks < %.1f ul "%(volume,self.MAXVOLUME), if reuseTip: msg+= "with tip reuse" else: msg+= "without tip reuse" logging.notice(msg) self.dispose(self.MAXVOLUME,src,mix,getDITI,dropDITI) self.dispose(volume-self.MAXVOLUME,src,False,getDITI,dropDITI) return cmt="Remove and dispose of %.1f ul from %s"%(volume, src.name) ditivolume=volume+src.inliquidLC.singletag if mix and not src.isMixed(): cmt=cmt+" with src mix" ditivolume=max(ditivolume,src.volume) if self.useDiTis: tipMask=4 if getDITI: worklist.getDITI(tipMask&self.DITIMASK,ditivolume) else: tipMask=self.cleantip() #print "*",cmt worklist.comment(cmt) if mix and not src.isMixed(): src.mix(tipMask) src.aspirate(tipMask,volume) if self.useDiTis and dropDITI: worklist.dropDITI(tipMask&self.DITIMASK,decklayout.WASTE)
def _do_at_all(self, cmd): logging.notice("+++ %s replset" % cmd) for x in self.shard_arr: eval("Replset(x).%s()" % cmd) logging.notice("+++ %s configserver" % cmd) for x in self.configserver_arr: eval("Configserver(x).%s()" % cmd) logging.notice("+++ %s mongos" % cmd) for x in self.mongos_arr: eval("Mongos(x).%s()" % cmd)
def test_logging(): logging.debug("this is debug") logging.info("this is info") logging.notice("this is notice") logging.warn("this is warn") logging.error("this is error")
def check_file_against_dict(conf_file, defaults, configuration, batch=False, auto_answer=None): ''' Check if a file has some configuration directives, and check against values if given. If the value is None, only the directive existence is tested. ''' # FIXME: move out configuration and use LMC.configuration # FIXME 2: move this method out of here (if it needs LMC, # it must not be in foundations)!! from licorn.foundations import readers conf_file_alter = False conf_file_data = open(conf_file, 'r').read() conf_file_dict = readers.simple_conf_load_dict(data=conf_file_data) for (directive, value) in defaults: if not conf_file_dict.has_key(directive): logging.warning(_(u'Inserted missing directive {1} in {0}.').format( stylize(ST_PATH, conf_file), stylize(ST_COMMENT, directive))) conf_file_alter = True conf_file_dict[directive] = value conf_file_data = '%s %s\n%s' % ( directive, value, conf_file_data) if value != None and conf_file_dict[directive] != value: logging.warning(_(u'modified {0} directive {1} to be equal to {2} ' u'(was originaly {3}).').format( stylize(ST_PATH, conf_file), stylize(ST_REGEX, directive), stylize(ST_OK, value), stylize(ST_BAD, conf_file_dict[directive]))) conf_file_alter = True conf_file_dict[directive] = value conf_file_data = re.sub(r'%s.*' % directive, r'%s %s' % (directive, value), conf_file_data) # else: # everything is OK, just pass. if conf_file_alter: if batch or logging.ask_for_repair(_(u'Modify {0} on disk to ' u'reflect current in-memory changes?').format( stylize(ST_PATH, conf_file)), auto_answer): try: open(conf_file, 'w').write(conf_file_data) logging.notice(_(u'Altered {0} to match {1} ' u'pre-requisites.').format( stylize(ST_PATH, conf_file), stylize(ST_NAME, configuration.app_name))) except (IOError, OSError), e: if e.errno == 13: raise exceptions.LicornRuntimeError(_(u'Insufficient ' u'permissions. Are you root?\n\t{0}').format(e)) else: raise e else: raise exceptions.LicornRuntimeError(_(u'Modifications in {0} are ' u'mandatory for {1} to work properly. Cannot continue without ' u'this, sorry!').format(stylize(ST_PATH, conf_file), stylize(ST_NAME, configuration.app_name)))
def __init__(self,name,plate,well=None,conc=None,volume=0,hasBeads=False,extraVol=50,mixLC=liquidclass.LCMixBottom,firstWell=None,extrainfo=[],ingredients=None,atEnd=False): # If firstWell is not None, then it is a hint of the first well position that should be used if well!=None and well!=-1: if not isinstance(well,int): well=plate.wellnumber(well) if well not in plate.wells: logging.warning("Attempt to assign sample %s to well %d (%s) which is not legal on plate %s"%(name,well,plate.wellname(well),plate.name)) for s in __allsamples: if s.well==well and s.plate==plate: logging.warning("Attempt to assign sample %s to plate %s, well %s that already contains %s"%(name,str(plate),plate.wellname(well),s.name)) well=None break if well is None: # Find first unused well found=False if firstWell is not None: # First check only wells>=firstWell for well in plate.wells: if well<firstWell: continue found=True for s in __allsamples: if s.plate==plate and s.well==well: well=well+1 found=False break if found: break if not found: well=max(plate.wells) if atEnd else min(plate.wells) while (well>=0) if atEnd else (well<=max(plate.wells)): found=True for s in __allsamples: if s.plate==plate and s.well==well: well=well+(-1 if atEnd else 1) found=False break if found: break elif well==-1: well=None for s in __allsamples: if s.plate==plate and s.well==well: logging.error("Attempt to assign sample %s to plate %s, well %s that already contains %s"%(name,str(plate),plate.wellname(well),s.name)) if name in [s.name for s in __allsamples]: while name in [s.name for s in __allsamples]: name=name+"#" logging.notice("renaming sample to %s"%name) self.name=name self.plate=plate if well>=plate.nx*plate.ny: logging.error("Overflow of plate %s while adding %s"%(str(plate),name)) self.well=well if isinstance(conc,Concentration) or conc is None: self.conc=conc else: self.conc=Concentration(conc) self.volume=volume self.initVol=volume if volume>0: if ingredients is None: self.ingredients={name:volume} else: self.ingredients=ingredients total=sum([v for v in ingredients.values()]) for k in self.ingredients: self.ingredients[k]=self.ingredients[k]*volume/total self.lastvolcheck=None else: self.ingredients={} self.lastvolcheck=0 # Assume that it has already been checked for 0 (since it can't be any less...) if plate.pierce: self.bottomLC=liquidclass.LCWaterPierce self.bottomSideLC=self.bottomLC # Can't use side with piercing self.inliquidLC=self.bottomLC # Can't use liquid detection when piercing else: self.bottomLC=liquidclass.LCWaterBottom self.bottomSideLC=liquidclass.LCWaterBottomSide self.inliquidLC=liquidclass.LCWaterInLiquid self.beadsLC=liquidclass.LCWaterBottomBeads self.mixLC=mixLC self.airLC=liquidclass.LCAir # Same as bottom for now self.emptyLC=self.bottomLC self.history="" __allsamples.append(self) if hasBeads: self.lastMixed=None else: self.lastMixed=clock.elapsed()-20*60 # Assume it was last mixed an 20 min before start of run self.wellMixed=True self.initHasBeads=hasBeads self.hasBeads=hasBeads # Setting this to true overrides the manual conditioning self.extraVol=extraVol # Extra volume to provide self.evap=0 # Amount that has evaporated if self.plate.name=="Samples": self.lastevapupdate=clock.pipetting else: self.lastevapupdate=clock.elapsed() self.extrainfo=extrainfo self.emptied=False