def serialStartListen(self, portnum=None): if portnum==None: portnum = self.serial_portnum self.serial = serial.Serial(portnum) # 56K baud? self.serial.baudrate = 115200 #ser.BAUDRATES[11] #actHL: get this right for 115200 bps or 57600 # 8N1 no handshake, serial port settings self.serial.bytesize = 8 self.serial.setParity('N') self.serial.setStopbits(1) self.serial.setTimeout(0.02) # timeout is in seconds and even 0.001 seems to be enough for the RZUSB stick, but 20 ms seems to be the most reliable self.serial.write(challenge) time.sleep(0.003) # wait an arbitrary 3 milliseconds for the uart on the zigbee to respond, probably unnecessary, usually works without it text = ser.read(1) # wiat for the timeout and try to read one byte if text: # see if anything came back in the first byte n = ser.inWaiting() #look if there is more to read if n: text = text + ser.read(n) #get it if len(text)>=len(self.serial_response): if text[-len(self.serial_response):] == self.serial_response: self.serial.write(self.serial_start_listener) """Menu point StartMonitor. """ log.debug("Reading from %s [%s, %s%s%s%s%s]" % (self.serial.portstr, self.serial.baudrate,self.serial.bytesize,self.serial.parity,self.serial.stopbits,self.serial.rtscts and ' RTS/CTS' or '', self.serial.xonxoff and ' Xon/Xoff' or '',)) self.serial.write("\x14") #HL: 0x14 = 20 = Ctrl-T = DC4 or DCL #time.sleep(.05) self.serial.write("n" + self.newline)
def sync_dirty_attributes(queue, loop=True): _l = queue.qsize() if _l > 0: if loop: _times = min(_l, MAX_SYNC_CNT_PER_LOOP) else: _times = _l i = 0 while i < _times: i += 1 try: attr = queue.get_nowait() attr.syncdb() except Queue.Empty: break except: pass log.info('End sync character to db, total: {0}, dirty attributes length: {1}'.format( _times, queue.qsize() )) if loop: reactor.callLater(SYNC_DB_INTERVAL, sync_dirty_attributes, queue) else: log.debug('End sync db, dirty attributes length {0}, loop:{1}'.format( queue.qsize(), loop))
def __init__(self, *args, **kwds): self.fileName = "rev20.log" self.serial_newline = '\r' # not necessary self.serial = serial.Serial() self.serial_challenge = "\x14\x0D\x0A" self.serial_response = "\x0D\x0ARES>" self.serial_start_listener = "n\x0D\x0A" # sniff command for Atmel RZUSB Stick self.serial_portnum = None numname = serialPortsAvailable.serialPortsAvailable() if len(numname)==1: log.debug("Found a serial port that responds as if a Transceive Technology product is attached") self.serial_portnum = numname[0][0] self.serialStartListen(self.serial_portnum) if self.serial_portnum == None: log.debug("No serial port was found, so trying the '"+self.fileName+"' file.") try: # To ensure timely file.close(), Python25 documentation recommends: # with open("myfile.txt") as f: # for line in f: # print line self.fileHandle = open(self.fileName, "r") except IOError, (errno, errstr): log.error("Unable to find & open input file named '"+self.fileName+"' due to I/O error({0}): {1}".format(errno, errstr)) self.fileHandle = False #actHL: add additional searches within parent directories, sub directories and for any *.log text files except OSError, (errno,errstr): log.error("Unable to find & open input file named '"+self.fileName+"' due to OS error({0}): {1}".format(errno, errstr)) self.fileHandle = False
def POST(self): ''' Method: post ''' try: try: data = web.data() build_info = utils.json_load_to_str(data) log.debug("build info: %s" % build_info) except Exception, e: log.error(e) raise UnableParsePostDataError # update status dbopts = SQLAdaptor() # commit_id = dbopts.SetBuildStatus(build_info) # if all results are failures, send email to commitor # phase_id, failure_list, commit_id = dbopts.CheckBuildFailures(build_info) if failure_list: if not opts.SendEmailToCommitor(build_info, commit_id): dbopts.SetCommitStatus(commit_id, 10) else: dbopts.SetCommitStatus(commit_id, 4) elif (not failure_list) and (phase_id == 3): dbopts.SetCommitStatus(commit_id, 1) else: dbopts.SetCommitStatus(commit_id, 3) return HTTPAdaptor.format_response("ok", "003", "Update status based on CI")
def sync_dirty_attributes(queue, loop=True): qsize = queue.qsize() if qsize > 0: log.info("sync data to db. dirty attrib length:%s." % qsize) if loop: sync_cnt = min(qsize, MAX_SYNC_CNT_PER_LOOP) else: sync_cnt = qsize i = 0 while i < sync_cnt: i += 1 try: attrib = queue.get_nowait() yield attrib.syncdb() except Queue.Empty: break except: pass if loop: reactor.callLater(SYNC_DB_INTERVAL, sync_dirty_attributes, queue) else: log.debug('End sync db, dirty attributes length {0}, loop:{1}'.format( queue.qsize(), loop))
def reload_plugins(irc, init=False): plugins_folder = [os.path.join(os.getcwd(), 'plugins')] plugins = set(glob.glob(os.path.join("plugins", "*.py"))) for plugin in plugins: _plugin = os.path.join(os.getcwd(), plugin) mtime = os.stat(_plugin).st_mtime if mtime != mtimes.get(_plugin): mtimes[_plugin] = mtime try: moduleinfo = imp.find_module(plugin.split("/")[1].split(".")[0], plugins_folder) pl = imp.load_source(plugin, moduleinfo[1]) except ImportError as e: if str(e).startswith('No module named'): log.error("Failed to load plugin {}: the plugin could not be found.".format(plugin)) else: log.error("Failed to load plugin {}: import error {}".format(plugin, str(e))) if init: sys.exit(1) except BaseException as e: log.error(e) else: if hasattr(pl, 'main'): pl.main(irc) log.debug("Calling main() function of plugin {}".format(pl)) try: if pl.name in utils.plugins.keys(): del(utils.plugins[pl.name]) utils.plugins[pl.name] = pl.cmds irc.state["plugins"][pl.name] = {} except AttributeError: pass log.info("(Re)Loaded {}".format(_plugin))
def coordinates(self, ws_name): """Extract coordinates and ID's from a list of worksheet-cells. Parameters ---------- ws_name : string The name of the worksheet to process. Returns ------- out : np.ndarray A numpy ndarray of shape (N,3) containing 3-tuples (floats) using the ID as index, representing the coordinates in (x, y, z) order. """ coords = [] # make sure the cells were already parsed: if not ws_name in self.cells: self._parse_cells(ws_name) # extract positions and ID: for cell in self.cells[ws_name]: idx = int(cell[7]) x = float(cell[0]) y = float(cell[1]) z = float(cell[2]) coords.insert(idx, (x, y, z)) log.debug("Parsed coordinates: %i" % len(coords)) return np.array(coords)
def test(r): if isinstance(r, basestring): r = resource.load(r) log.debug('Trying {}'.format(r.name)) script_path = os.path.join(r.db_obj.base_path, 'test.py') if not os.path.exists(script_path): log.warning('resource {} has no tests'.format(r.name)) return {} log.debug('File {} found'.format(script_path)) with open(script_path) as f: module = imp.load_module( '{}_test'.format(r.name), f, script_path, ('', 'r', imp.PY_SOURCE) ) try: module.test(r) return { r.name: { 'status': 'ok', }, } except Exception: return { r.name: { 'status': 'error', 'message': traceback.format_exc(), } }
def reload_plugins(init=False): plugins_folder = [os.path.join(os.getcwd(), 'plugins')] plugins = set(glob.glob(os.path.join("plugins", "*.py"))) for plugin in plugins: _plugin = os.path.join(os.getcwd(), plugin) mtime = os.stat(_plugin).st_mtime if mtime != mtimes.get(_plugin): mtimes[_plugin] = mtime try: moduleinfo = imp.find_module(plugin.split("/")[1].split(".")[0], plugins_folder) pl = imp.load_source(plugin, moduleinfo[1]) except ImportError as e: if str(e).startswith('No module named'): log.error('Failed to load plugin %r: the plugin could not be found.', plugin) else: log.error('Failed to load plugin %r: import error %s', plugin, str(e)) if init: sys.exit(1) except BaseException as e: log.error(e) pass else: if hasattr(pl, 'main'): for server in utils.connections.values(): pl.main(server) log.debug('%r Calling main() function of plugin %r', server.netname, pl) log.info("(Re)Loaded %s", _plugin)
def call(self, func, args=None): self.resetTimeout() _d = defer.Deferred() if self.transport and func and self.deferreds is not None: obj = (func, args) data = dumps(obj) body_length = len(data) self.seq = inc_seq(self.seq) self.deferreds[self.seq]= _d _header = struct.pack(self.HEADER_FORMAT_L, TCP_REQ, self.seq, body_length) try: self.transport.write(_header + data) log.debug("[ CALL ]:ar_id:%d, func:%s, body_length:%d, to:%s" % (self.seq, func, body_length, self.transport.getPeer())) except: self.transport.loseConnection() _d.errback(Exception("call failed")) else: log.warn("[ CALL ]:unknown args client:%s or func:%s or deferreds:%s." % (self.transport.getPeer(), func, self.deferreds)) _d.errback(Exception("call failed")) self.resetTimeout() return _d
def world_ranklist(p, req): if hasattr(p, "uid"): log.debug('uid:{0}'.format(p.uid)) uid = p.uid else: # used to test log.error('client has not found uid.') defer.returnValue((CONNECTION_LOSE, None)) user = g_UserMgr.getUserByUid(uid) if not user: defer.returnValue((CONNECTION_LOSE, None)) data = list() other_data = list() weight_data = yield redis.zrange(SET_RANK_PVP_WEIGHT, 0, 10, True) for _rank, (_uid, _weight) in enumerate(weight_data): _machine_code = yield redis.hget(HASH_UID_MACHINE_CODE, _uid) other_data.append((_rank+1, _uid, _machine_code, _weight)) self_rank = yield redis.zrank(SET_RANK_PVP_WEIGHT, uid) self_rank = 0 if self_rank is None else int(self_rank) + 1 self_machine_code = yield redis.hget(HASH_UID_MACHINE_CODE, uid) self_weight = yield redis.zscore(SET_RANK_PVP_WEIGHT, uid) self_weight = 0 if self_weight is None else abs(self_weight) data = (other_data, (self_rank, uid, self_machine_code, self_weight)) defer.returnValue((NO_ERROR, data))
def loginUser(self, char_data, flag=False): ''' @param: char_data=[id, account, nick_name, ...] @param: flag-离线登陆的标志位 ''' from manager.gscharacter import GSCharacter char_data = dict(zip(GSCharacter._fields, char_data)) cid = char_data['id'] user = self.getUser(cid, flag) if user: log.error('user had exists already. cid {0}.'.format(cid)) return user user = GSCharacter(cid) self.__dic_uid_user[cid] = user user.load( char_data, flag ) # 离线登陆的标志位 if flag: user.offline_num += 1 log.debug('load user data. cid: {0}'.format( cid )) return user
def InsertJobInfo(self, job_info): log.debug("Insert job info into database") try: curtime = datetime.utcnow() conn = self._init_connection() cursor = conn.cursor() cursor.execute("SET AUTOCOMMIT = 0") for jobname, jobcontent in job_info.iteritems(): cursor.execute(SQL_SELECT_JOB_ID, (jobname, )) for row in cursor: job_id = row[0] tnx = jobcontent["transaction"] if tnx: cursor.execute(SQL_SELECT_COMMIT_ID_BY_TRANSACTION, (tnx, )) else: cursor.execute(SQL_SELECT_COMMIT_ID_BY_REVISION, (jobcontent["revision"], )) for row in cursor: commit_id = row[0] cursor.execute(SQL_INSERT_BUILD_INFO, (job_id, commit_id, curtime)) conn.commit() except Exception, e: log.error('Insert job info into database failed') log.error(e) conn.rollback()
def InsertCommitInfo(self, commit_info): log.debug("Insert commit info into database") try: curtime = datetime.utcnow() conn = self._init_connection() cursor = conn.cursor() cursor.execute("SET AUTOCOMMIT = 0") cursor.execute(SQL_INSERT_COMMIT_INFO, (commit_info["author"], commit_info["transaction"], commit_info["revision"], commit_info["uuid"], commit_info["comments"], 7, curtime)) commit_id = cursor.lastrowid for change in commit_info["changes"]: cursor.execute(SQL_INSERT_CHANGESET_INFO, ( commit_id, change["type"], change["filename"] if change.has_key("filename") else "", change["fileurl"] if change.has_key("fileurl") else "", change["diffname"] if change.has_key("diffname") else "", change["diffpath"] if change.has_key("diffpath") else "", curtime)) conn.commit() except Exception, e: log.error('commit info into database failed') log.error(e) conn.rollback()
def processPackets(self): log.debug("Starting processPackets()") i=0 while self and not self.done: #alive: # what about isRunning() member function for apps? #log.debug("Sleeping for half a second "+str(i)) for j in range(10): # don't process more than one packet a second and keep checking self.done while waiting if self.done: return time.sleep(0.05) if not self.pause and not self.done: log.debug("processing packet "+str(i)) i += 1 p = self.data.getPacket() if p != None and p.t>0 and p.neighbors[0][1]>0 and self and not self.done: self.model.Add(p) # so model is updated directly # now need to update view pos = self.model.GetPositions() while self.view.panel1.Moving and not self.done: time.sleep(0.01) # wait 10 milliseconds for user to stop dragging objects around #pass self.view.panel1.Moving = True # so mouse tasks cant move pub.sendMessage("REFRESH") #self.view.SetPositions(pos) #log.debug("Mesh object:\n"+str(self.model)) pub.sendMessage("DISTANCE") #d = self.model.GetDistances() #log.debug("Distances retrieved from Mesh:\n"+str(d)) #self.view.SetDistances(d) # nothing prevents this from interfering with other tasks self.view.panel1.Moving = False # resume allowing user to drag objects around
def findUSBMedia(showAll=False): """Scan attached USB devices for installation media. If showAll is True, all CD devices are returned even if they do not contain any installation media. """ diskSet = devices.DiskSet(forceReprobe=True) retval = [] util.umount(USB_MEDIA_PATH) for disk in diskSet.values(): if disk.driverName != devices.DiskDev.DRIVER_USB_STORAGE: log.debug("skipping non-usb device -- %s" % disk.name) continue diskMedia = [] for part in disk.partitions: if not part.consoleDevicePath: continue diskMedia += _tryDevice(disk, part) if not diskMedia and showAll: # No installation media was found on the device, but the caller # wants the list of potential media devices, so add that in. diskMedia = [media.MediaDescriptor(disk.name)] retval += diskMedia return retval
def __init__(self, objects=[]): self.N = ( 0 ) # unfortunately can't use len(objects) because objects passed can include 0,1,2,3 or more new nodes for the mesh to keep up with log.debug("Mesh__init__ with zero radios") self.MAX_NODES = ( 10 ) # 32 would be a 32x32 distances matrix with 1000 elements, probably the limit of what'self practical for this brute force record keeping method self.addresses = np.empty( (self.N), dtype=int ) # Nx1 empty 1-D python array for network addresses of the radios/nodes self.positions = np.empty((self.N, 3), dtype=float) # Nx3 empy numpy array for 3-D radio node positions self.p2d = np.empty( (self.N, self.N), dtype=float ) # empty NxN numpy matrix of distances implied by the positions self.distances = np.empty((self.N, self.N)) # empty NxN numpy array for measured distances between nodes # self.distances = DistanceMatrix((self.N, self.N),dtype=float) # empty NxN numpy array for measured distances between nodes self.alpha = ( 0.8 ) # portion of the previous range information to retain when adding in new range info, smaller = higher bandwidth and faster/noisier/jumpier tracking self.beta = 1 - self.alpha # portion of new range info to add in self.e = None # print "New Mesh" for o in objects: Add(o) # type checking and sorting within Add() attribute function log.debug("Mesh__init__ completed with " + str(self.N) + " radios")
def upload_file(self, albumid, filename, caption=None): data = filename_get_data(filename) # prep HTTP PUT to upload image h = httplib.HTTPConnection("upload.smugmug.com") h.connect() h.putrequest('PUT', "/" + filename) h.putheader('Content-Length', str(len(data))) h.putheader('Content-MD5', hashlib.md5(data).hexdigest()) h.putheader('X-Smug-SessionID', self.session) h.putheader('X-Smug-Version', '1.2.1') h.putheader('X-Smug-ResponseType', 'Xml-RPC') h.putheader('X-Smug-AlbumID', str(albumid)) h.putheader('X-Smug-FileName', filename) if caption: h.putheader('X-Smug-Caption', caption) h.endheaders() h.send(data) # response output resp = h.getresponse() log.debug("%s: %s", resp.status, resp.reason) result = resp.read() h.close() log.debug("PUT: result: %s", result)
async def call(self, func, args=None): _f = asyncio.Future() if self.transport and func: obj = (func, args) data = dumps(obj) body_length = len(data) self.seq = inc_seq(self.seq) self.__futures[self.seq]= _f _header = struct.pack(self.HEADER_FORMAT_L, TCP_REQ, self.seq, body_length) try: self.transport.write(_header + data) asyncio.get_event_loop().call_later(self.TIMEOUT, self.__callTimeout, obj) log.debug("[ CALL ]:ar_id:{0}, func:{1}, body_length:{2}, to:{3}".format(self.seq, func, body_length, self.__peer)) except Exception as e: self.transport.abort() _f.set_exception(e) else: log.warn("[ CALL ]:unknown args client:{0} or func:{1}.".format(self.__peer, func)) _f.set_exception(Exception("call failed")) self.resetTimeout() return (await _f)
def getOrderedDrives(self, allowUserOverride=True): '''Return a list of drives. The order will be the order that the BIOS puts them in, unless the user has specified a particular device to go first. This is primarily used to set up GRUB TODO: the scripted install "driveOrder" command only affects the order of at most one device. This is how I understand it should work. If that's the case, maybe we need to change the name from "driveOrder" to something else. ''' allDrives = self.disks.values() comparator = operator.attrgetter('biosBootOrder') allDrives.sort(key=comparator) # XXX - remove this at some point since mixing userchoices here # is bad. if allowUserOverride: bootOptions = userchoices.getBoot() if bootOptions: driveOrder = bootOptions['driveOrder'] if driveOrder: firstDrive = driveOrder[0] if firstDrive not in allDrives: raise InvalidDriveOrder(firstDrive) allDrives.remove(firstDrive) allDrives.insert(0, firstDrive) else: log.debug("No drive order specified. Set to default.") else: log.debug("Drive order set to default.") return allDrives
def discoverLocation(subDict): '''Discover where grub is installed, either on the MBR or the first sector of the /boot partition. ''' wholeDiskDev, _partNum = partition.splitPath(subDict['devPartName']) diskFile = open(wholeDiskDev) mbrContents = diskFile.read(512) diskFile.close() partFile = open(subDict['devPartName']) partBootContents = partFile.read(512) partFile.close() loc = None doNotInstall = False if _sectorHasGrub(partBootContents): log.debug("found grub installed on the /boot partition") loc = userchoices.BOOT_LOC_PARTITION elif _sectorHasGrub(mbrContents): log.debug("found grub installed on the MBR") loc = userchoices.BOOT_LOC_MBR else: log.warn("grub was not found, not upgrading boot loader") doNotInstall = True userchoices.setBoot(True, doNotInstall, location=loc)
def hostAction(context): subDict = getStringSubstitutionDict() log.debug('Dumping bootloader variables...') safeDict = subDict.copy() del safeDict['passwordLine'] log.debug(str(safeDict)) makeBackups() if userchoices.getUpgrade(): discoverLocation(subDict) subDict = getStringSubstitutionDict() context.cb.pushStatusGroup(5) context.cb.pushStatus('Copying the GRUB images') copyGrubStageImages() context.cb.popStatus() context.cb.pushStatus('Writing the GRUB config files') writeGrubConfFiles(subDict) context.cb.popStatus() context.cb.pushStatus('Making the initial ramdisk') makeInitialRamdisk(subDict) context.cb.popStatus() context.cb.pushStatus('Writing GRUB to the Master Boot Record') w = MBRWriter() w.write(subDict) context.cb.popStatus() context.cb.popStatusGroup() sanityCheck(subDict)
def _execute(command, resources): args = command.split() executable = args[0] tmp_file = None try: prefix = 'resources' + os.path.sep if executable.startswith(prefix): log.debug('%s in "%s" is a resource', executable, command) tmp_file = tempfile.NamedTemporaryFile(prefix='sq-', suffix='-cmd', delete=False) script = resources[executable[len(prefix):]]() tmp_file.write(script) stat_result = os.fstat(tmp_file.fileno()) new_mode = stat_result.st_mode | stat.S_IXUSR os.fchmod(tmp_file.fileno(), new_mode) tmp_file.close() args[0] = tmp_file.name log.debug('Executing %s in dir %s', args, os.getcwd()) subprocess.check_call(' '.join(args), shell=True) finally: if tmp_file: os.remove(tmp_file.name)
def cloneDir(src, dst): log.debug("cloning dir %s -> %s" % (src, dst)) if not os.path.exists(dst): os.makedirs(dst) st = os.stat(src) shutil.copystat(src, dst) os.chown(dst, st[stat.ST_UID], st[stat.ST_GID])
def __init_schedule_jobs(): """Init scheduled jobs in fact""" log.debug("init scheduled jobs......") sche = RequiredFeature("scheduler") hackathon_manager = RequiredFeature("hackathon_manager") host_server_manager = RequiredFeature("docker_host_manager") # schedule job to check recycle operation next_run_time = util.get_now() + timedelta(seconds=10) sche.add_interval(feature="expr_manager", method="scheduler_recycle_expr", id="scheduler_recycle_expr", next_run_time=next_run_time, minutes=10) # schedule job to pre-allocate environment hackathon_manager.schedule_pre_allocate_expr_job() # schedule job to pull docker images automatically if not safe_get_config("docker.alauda.enabled", False): docker = RequiredFeature("hosted_docker") docker.ensure_images() # schedule job to pre-create a docker host server VM host_server_manager.schedule_pre_allocate_host_server_job()
def grubDiskAndPartitionIndicies(part): '''Grub defines disks and partitions by their indicies as they are found in the boot order. It has no knowledge of sdX versus hdX versus CCISS versus DAC960. It just knows the first, second, third, etc. and expects them to be indicated in its conf files in a format like (hd0,0) So this function, for a given partition, gets the index of the disk and the index of the partition on said disk. Raise a KeyError if it can not be found in the DiskSet. ''' disks = devices.DiskSet() orderedDrives = disks.getOrderedDrives() for diskIndex, drive in enumerate(orderedDrives): for candidatePart in drive.partitions: if candidatePart == part: # logical partitions ALWAYS start at 4, so use partitionId grubPartitionNum = candidatePart.partitionId-1 log.debug('Disk/partition %s/%s enumerated as %d,%d ' 'for grub' % (drive.name, candidatePart.name, diskIndex, grubPartitionNum)) return (diskIndex, grubPartitionNum) nonStandardDisk = disks.findDiskContainingPartition(part) if nonStandardDisk: # XXX this should disappear... # logical partitions ALWAYS start at 4, so use partitionId grubPartitionNum = part.partitionId-1 log.debug('Non-standard disk/partition 0/%d' % (grubPartitionNum)) return (0, grubPartitionNum) partName = part.getName() raise KeyError('Partition not found in disk set: %s' % partName)
def shortest_path_and_len(self, src, dst): path = dijkstra_path(self.g, src, dst, 'weight') path_len = self.path_len(path) if path_len >= max_int: return None, None log.debug('path_len: %s, path: %s' % (path_len, path)) return path, path_len
def delete(self, primary_key=None, where=None): ''' @where : when primary_key=None, where format like: {'cid':1} ''' yield self.load(need_value=False) if not self._multirow: log.warn('[ %s ]Deleted. %s.' % (self.__class__, self.dict_attribs.value)) self.dict_attribs.delete() else: if primary_key: _attr = self.dict_attribs.pop( primary_key, None ) if _attr: log.debug('[ %s.delete ]deleted, id:%s, data:%s' % ( self.__class__, primary_key, _attr.value )) _attr.delete() _attr.syncdb() elif where: _found = False for _key, _attr in self.dict_attribs.items(): eq = 0 for where_k, where_v in where.iteritems(): _v = getattr(_attr, where_k, None) if _v == where_v: eq += 1 if eq >= len(where): log.debug('[ %s.delete ]deleted, id:%s, where:%s, data:%s' % ( self.__class__, _key, where, _attr.value )) del self.dict_attribs[_key] _attr.delete() _attr.syncdb()
def GetTestSummary(self, jobs): '''Query from test result database''' log.debug("Get test summary") try: conn = self._init_connection(config.database_test_result) cursor = conn.cursor() test_num = 0 failures = 0 tests = [] for job in jobs: cursor.execute(SQL_SELECT_TEST_NUM, (job[0], job[1])) for row in cursor: tests.append(row[0]) # test_num = tests.__len__() for test in tests: cursor.execute(SQL_SELECT_TEST_RESULT_ID, (job[0], job[1], test)) for row in cursor: test_num += 1 if row[0] == 2: failures += 1 except Exception, e: log.error("Get test summary failed") log.error(e)
def handle_pull_request(request): with handle_pull_request_lock: data = json.loads(request.body.decode("utf-8")) pr_data = data["pull_request"] repo = pr_data["base"]["repo"]["full_name"] if not repo in config.repos: log.warning("ignoring PR for repo %s", repo) return #print(json.dumps(data, sort_keys=False, indent=4)) action = data["action"] pr_url = pr_data["_links"]["html"]["href"] log.info("PR %s hook action %s", pr_url, action) if not action in known_actions: log.warning("PR %s unknown action %s", pr_url, action) log.debug(json.dumps(data, sort_keys=False, indent=4)) if action in { "closed" }: PullRequest.close(pr_data) return pr = PullRequest.get(pr_data).update() if action == "unlabeled": pr.remove_label(data["label"]["name"]) elif action == "labeled": pr.add_label(data["label"]["name"]) elif action in { "created", "opened", "reopened" } and not config.ci_ready_label in pr.labels: status = { "description": "\"%s\" label not set" % config.ci_ready_label, "target_url" : config.http_root, } pr.set_status(pr_data["head"]["sha"], **status)
def replace_version(module, pom): """ Replace version in dependency and plugin part. :param module: module name :param pom: pom file path """ log.debug('Replacing version in file: {}'.format(pom)) pom_dict = config[module][pom] if VERSION_UPDATE_ITEMS not in pom_dict: log.warn( 'No config key {} in pom parameters.'.format(VERSION_UPDATE_ITEMS)) return version_update_items = pom_dict[VERSION_UPDATE_ITEMS] log.debug('Module: {}, versions: {}'.format(module, get_str(version_update_items))) with in_place.InPlace(pom) as file: line_num = 0 for line in file: line_num = line_num + 1 for version_update_item in version_update_items: if version_update_item.id in line: # update version in dependency part if X_VERSION_UPDATE in line: old_version = line[(line.index('<version>') + 9):line.index('</version>')] if old_version != version_update_item.new_version: new_line = line.replace( old_version, version_update_item.new_version) log.debug( 'Updating version of dependency in line {}'. format(line_num)) log.debug(' old_line = {}.'.format( line.strip('\n'))) log.debug(' new_line = {}.'.format( new_line.strip('\n'))) line = new_line else: log.warn( 'The same with new version in dependency part.' ) # update version in plugin part elif X_INCLUDE_UPDATE in line: old_version = line[(line.index('[') + 1):line.index(']')] if old_version != version_update_item.new_version: new_line = line.replace( old_version, version_update_item.new_version) log.debug('Updating line {}'.format(line_num)) log.debug(' old_line = {}.'.format( line.strip('\n'))) log.debug(' new_line = {}.'.format( new_line.strip('\n'))) line = new_line else: log.warn( 'The same with new version in plugin part.') file.write(line)
def update_dependency_dict(dependency_dict, root_pom_id): root_pom_info = root_pom_id.split(';') root_pom_group_artifact = root_pom_info[0] root_pom_group_info = root_pom_group_artifact.split(':') root_pom_group_id = root_pom_group_info[0] root_pom_artifact_id = root_pom_group_info[1] root_pom_version = root_pom_info[1] dependency_dict[root_pom_group_id + ':' + root_pom_artifact_id] = root_pom_version root_pom = Pom(root_pom_group_id, root_pom_artifact_id, root_pom_version, 1) q = queue.Queue() q.put(root_pom) pom_count = 1 log.info('Added root pom.depth = {}, url = {}.'.format( root_pom.depth, root_pom.to_url())) while not q.empty(): pom = q.get() pom_url = pom.to_url() log.info('Get dependencies from pom. depth = {}, url = {}.'.format( pom.depth, pom_url)) try: tree = elementTree.ElementTree(file=request.urlopen(pom_url)) except HTTPError: log.warn('Error in open {}'.format(pom_url)) continue project_element = tree.getroot() property_dict = {} parent_element = project_element.find('./maven:parent', MAVEN_NAME_SPACE) if parent_element is not None: # get properties from parent parent_group_id = parent_element.find( './maven:groupId', MAVEN_NAME_SPACE).text.strip(' ${}') parent_artifact_id = parent_element.find( './maven:artifactId', MAVEN_NAME_SPACE).text.strip(' ${}') parent_version = parent_element.find( './maven:version', MAVEN_NAME_SPACE).text.strip(' ${}') parent_pom = Pom(parent_group_id, parent_artifact_id, parent_version, pom.depth + 1) parent_pom_url = parent_pom.to_url() parent_tree = elementTree.ElementTree( file=request.urlopen(parent_pom_url)) parent_project_element = parent_tree.getroot() log.debug( 'Get properties from parent pom. parent_pom_url = {}.'.format( parent_pom_url)) update_property_dict(parent_project_element, property_dict) update_property_dict(project_element, property_dict) # get dependencies dependency_elements = project_element.findall( './maven:dependencyManagement/maven:dependencies/maven:dependency', MAVEN_NAME_SPACE) for dependency_element in dependency_elements: group_id = dependency_element.find( './maven:groupId', MAVEN_NAME_SPACE).text.strip(' ${}') # some group_id contain 'project.groupId', so put project_version first. if group_id in property_dict: group_id = property_dict[group_id] artifact_id = dependency_element.find( './maven:artifactId', MAVEN_NAME_SPACE).text.strip(' ') version = dependency_element.find( './maven:version', MAVEN_NAME_SPACE).text.strip(' ${}') key = group_id + ':' + artifact_id if version in property_dict: version = property_dict[version] if key not in dependency_dict: dependency_dict[key] = version log.debug( 'Dependency version added. key = {}, value = {}'.format( key, version)) elif version != dependency_dict[key]: log.info( 'Dependency version skipped. key = {}, version = {}, dependency_dict[key] = {}.' .format(key, version, dependency_dict[key])) artifact_type = dependency_element.find('./maven:type', MAVEN_NAME_SPACE) artifact_scope = dependency_element.find('./maven:scope', MAVEN_NAME_SPACE) if artifact_type is not None and \ artifact_scope is not None and \ artifact_type.text.strip() == 'pom' and \ artifact_scope.text.strip() == 'import': new_pom = Pom(group_id, artifact_id, version, pom.depth + 1) q.put(new_pom) pom_count = pom_count + 1 log.info('Root pom summary. pom_count = {}, root_pom_url = {}'.format( pom_count, root_pom.to_url()))
def dumpExceptionInfo(ex): log.debug('An exceptional situation was encountered.' ' Weasel does not know how to handle this.' ' Terminating.') log.debug('The class of the exception was: %s' % str(ex.__class__)) log.debug('The exception was: %s' % str(ex)) log.debug('Dumping userchoices') log.debug(userchoices.dumpToString()) log.debug('\n************* UNHANDLED WEASEL EXCEPTION **************') log.debug(traceback.format_exc()) log.debug('**************************************************\n')
client.session.save() break transfer.post_init() await runner.setup() await web.TCPSite(runner, host, port).start() async def stop() -> None: await runner.cleanup() await client.disconnect() try: loop.run_until_complete(start()) except Exception: log.fatal("Failed to initialize", exc_info=True) sys.exit(2) log.info("Initialization complete") log.debug(f"Listening at http://{host}:{port}") log.debug(f"Public URL prefix is {public_url}") try: loop.run_forever() except KeyboardInterrupt: loop.run_until_complete(stop()) except Exception: log.fatal("Fatal error in event loop", exc_info=True) sys.exit(3)
def updateProgram(): try: if os.path.exists(UPDATE_FILE): os.remove(UPDATE_FILE) except PermissionError: log.error("Cannot remove installer exe, must be open still") return False try: #Get our version so we see if we need to update with open("version.txt") as file: versionCurrent = file.read() log.debug("Current Version:", versionCurrent) except: versionCurrent = None log.warning("Version file not found") try: log.info("Beginning update check") with urlopen(UPDATE_LINK) as response: updateData = json.loads(response.read().decode("utf-8")) newVersion = updateData["tag_name"] log.debug("Good data received") log.debug("Most Recent:", newVersion, "| Our Version:", versionCurrent) if newVersion != versionCurrent: #The tag should be the released version if questionBox("Version " + newVersion + " now available! Would you like to update?", title="Update"): try: #After this point, we want another exception handler that will stop the program with error, because the user expects a download to be happening log.info("Updating to version", newVersion) fileData = updateData["assets"][0] webAddress = fileData["browser_download_url"] # used to be 'fileData["name"]' with urlopen(webAddress) as webfile, open( UPDATE_FILE, "wb") as file: progress = FileDLProgressBar( "Downloading new update") progress.start() log.debug("Downloading new file from", webAddress) #Both file and webfile are automatically buffered, so this is fine to do copyfileobj(webfile, file) progress.close() subprocess.Popen( UPDATE_FILE ) #Call this file and then exit the program except IndexError: #No binary attached to release -- no assets (probably) #In future we might check updates before this one, to ensure we are somewhat updated log.error("No binary attached to most recent release!") except BaseException as e: #BaseException because return statement in finally stops anything from getting out log.error("Error in downloading new update!", exc_info=e) finally: return True #Notice: This stops any error propagation for other errors else: log.info("User declined update") else: log.info("We have the most recent version") except URLError: log.warning("Not connected to the internet!") errorBox("Not connected to the internet!", title="Fatal Error") raise RuntimeError("No internet") except Exception as e: #Log the error. We still want them to run the program if update was not successful log.error("Error in update!", exc_info=e) #If we did not return in the function, we did not update properly return False
events.Fire( "MouseMove", event ) #Draw Everything parkPanel.DoGraphics( screen, pygame.display, timeChange ) if allobjects.thousandCounter == nextGUIRefresh: uiPanel.DoGraphics( screen, pygame.display, timeChange ) nextGUIRefresh += 30 nextGUIRefresh %= 1000 # if the displayer is done, it's time to switch to # a different displayer. Sometimes the displayer doesn't # know what should come after it is done. # If we can't figure it out, default to the Main Menu if parkPanel.isDone: log.debug( "DISPLAYER IS DONE" ) #if the displayer was the quit screen, we should quit if isinstance( parkPanel, QuitScreen ): break # clear out the old blitted image bgMangr.GetBgSurface(screen, screen.get_rect()) pygame.display.flip() nextDisplayer = parkPanel.replacementDisplayerClass if isclass( nextDisplayer ): parkPanel = nextDisplayer(bgMangr, musicMangr) else: parkPanel = MainMenu(bgMangr, musicMangr)
def addItem(self, add_item_type, add_item_id, add_count, partial_add=False ): #Return item inst if succeed, else return None. if add_count > BAG_MAX_PILE: log.warn('Exp3404361 Add count > max pile. Add count :', add_count) defer.returnValue(None) gs_att_find = GSAttribute(self.__cid, self._tbl_name, 0, self._item_field_set) gs_att_find.updateGSAttribute(False, { 'item_type': add_item_type, 'item_id': add_item_id, 'count': add_count }) target_item = GSBagItem(gs_att_find) found_pos = bisect.bisect_left(self.__list, target_item) found_item = None log.debug('Find pos {0}'.format(found_pos)) find_same = False if found_pos < 0 or found_pos >= len(self.__list): find_same = False else: found_item = self.__list[found_pos] if target_item.isSame(found_item): find_same = True else: find_same = False if find_same: leftmost = self.findLeftmostSameOne(found_pos) found_item = self.__list[leftmost] log.debug('Leftmost pos {0}'.format(leftmost)) add_count = self.dispatchAddCount( leftmost, add_count, BAG_MAX_PILE ) #Try to dispath add count on existed (one or several ) items! count_sum = found_item.count + add_count if count_sum <= BAG_MAX_PILE: #Fully pile, just modify existing item's count. found_item.setItemCount(count_sum) defer.returnValue(found_item) else: if not partial_add: #Add fail! Not allow partial pile! pass else: #Partial pile, ajust their count. found_item.setItemCount(BAG_MAX_PILE) add_count = count_sum - BAG_MAX_PILE if len(self.__list) >= self.__capacity: log.debug('[ GSBag::addItem ] Bag is full! Cur capacity:', self.__capacity, ' partial_add:', partial_add) defer.returnValue(None) log.debug( '[ GSBag::addItem ] Need create item via cs. cid {0}, type {1}, id {2}, count {3}' .format(self.__cid, add_item_type, add_item_id, add_count)) try: new_item = yield self.createItemViaCS(add_item_type, add_item_id, add_count) except Exception as e: log.exception() defer.returnValue(None) if not new_item: log.error( 'Exp39303873 create item via cs fail ! cid {0}, item {1}, {2}'. format(self.__cid, add_item_type, add_item_id)) defer.returnValue(None) bisect.insort_left(self.__list, new_item) defer.returnValue(new_item)
def _trade(self, security, price=0, amount=0, volume=0, entrust_bs="buy"): """ 调仓 :param security: :param price: :param amount: :param volume: :param entrust_bs: :return: """ stock = self._search_stock_info(security) balance = self.get_balance()[0] if stock is None: raise exceptions.TradeError(u"没有查询要操作的股票信息") if not volume: volume = int(float(price) * amount) # 可能要取整数 if balance["current_balance"] < volume and entrust_bs == "buy": raise exceptions.TradeError(u"没有足够的现金进行操作") if stock["flag"] != 1: raise exceptions.TradeError(u"未上市、停牌、涨跌停、退市的股票无法操作。") if volume == 0: raise exceptions.TradeError(u"操作金额不能为零") # 计算调仓调仓份额 weight = volume / balance["asset_balance"] * 100 weight = round(weight, 2) # 获取原有仓位信息 position_list = self._get_position() # 调整后的持仓 is_have = False for position in position_list: if position["stock_id"] == stock["stock_id"]: is_have = True position["proactive"] = True old_weight = position["weight"] if entrust_bs == "buy": position["weight"] = weight + old_weight else: if weight > old_weight: raise exceptions.TradeError(u"操作数量大于实际可卖出数量") else: position["weight"] = old_weight - weight position["weight"] = round(position["weight"], 2) if not is_have: if entrust_bs == "buy": position_list.append({ "code": stock["code"], "name": stock["name"], "enName": stock["enName"], "hasexist": stock["hasexist"], "flag": stock["flag"], "type": stock["type"], "current": stock["current"], "chg": stock["chg"], "percent": str(stock["percent"]), "stock_id": stock["stock_id"], "ind_id": stock["ind_id"], "ind_name": stock["ind_name"], "ind_color": stock["ind_color"], "textname": stock["name"], "segment_name": stock["ind_name"], "weight": round(weight, 2), "url": "/S/" + stock["code"], "proactive": True, "price": str(stock["current"]), }) else: raise exceptions.TradeError(u"没有持有要卖出的股票") if entrust_bs == "buy": cash = ((balance["current_balance"] - volume) / balance["asset_balance"] * 100) else: cash = ((balance["current_balance"] + volume) / balance["asset_balance"] * 100) cash = round(cash, 2) log.debug("weight:%f, cash:%f", weight, cash) data = { "cash": cash, "holdings": str(json.dumps(position_list)), "cube_symbol": str(self.account_config["portfolio_code"]), "segment": 1, "comment": "", } try: resp = self.s.post(self.config["rebalance_url"], data=data) # pylint: disable=broad-except except Exception as e: log.warning("调仓失败: %s ", e) return None else: log.debug("调仓 %s%s: %d", entrust_bs, stock["name"], resp.status_code) resp_json = json.loads(resp.text) if "error_description" in resp_json and resp.status_code != 200: log.error("调仓错误: %s", resp_json["error_description"]) return [{ "error_no": resp_json["error_code"], "error_info": resp_json["error_description"], }] return [{ "entrust_no": resp_json["id"], "init_date": self._time_strftime(resp_json["created_at"]), "batch_no": "委托批号", "report_no": "申报号", "seat_no": "席位编号", "entrust_time": self._time_strftime(resp_json["updated_at"]), "entrust_price": price, "entrust_amount": amount, "stock_code": security, "entrust_bs": "买入", "entrust_type": "雪球虚拟委托", "entrust_status": "-", }]
try: self.assertTrue(False, "Underflow Check Timed Out") except AssertionError, e: self.failures.append(str(e)) pass # Use a timer for timeout functionality # 20 second timeout timer = Timer(20, endProcess) try: timer.start() stdout = p.communicate()[0] log.debug(stdout) # Should fail only on the last iteration of the loop try: self.assertEqual(stdout.count("Got event code underflow message."), 1, "TX underflows could not be requested async") except AssertionError, e: self.failures.append(str(e)) pass finally: timer.cancel() if __name__ == '__main__': crimson_test_suite = gr_unittest.TestSuite()
def reconnect(p, req): cid, sk = req log.debug('request reconnect. cid: {0}.'.format(cid)) return g_UserMgr.reconnectUser(p, cid, sk)
def __q(q, args=(), one=False): log.debug('querying %s with %s', q, args) c = sql.connect(DB_FILE).execute(q, args) res = c.fetchall() c.close() return (res[0] if res else None) if one else res
def adjust_weight(self, stock_code, weight): """ 雪球组合调仓, weight 为调整后的仓位比例 :param stock_code: str 股票代码 :param weight: float 调整之后的持仓百分比, 0 - 100 之间的浮点数 """ stock = self._search_stock_info(stock_code) if stock is None: raise exceptions.TradeError(u"没有查询要操作的股票信息") if stock["flag"] != 1: raise exceptions.TradeError(u"未上市、停牌、涨跌停、退市的股票无法操作。") # 仓位比例向下取两位数 weight = round(weight, 2) # 获取原有仓位信息 position_list = self._get_position() # 调整后的持仓 for position in position_list: if position["stock_id"] == stock["stock_id"]: position["proactive"] = True position["weight"] = weight if weight != 0 and stock["stock_id"] not in [ k["stock_id"] for k in position_list ]: position_list.append({ "code": stock["code"], "name": stock["name"], "enName": stock["enName"], "hasexist": stock["hasexist"], "flag": stock["flag"], "type": stock["type"], "current": stock["current"], "chg": stock["chg"], "percent": str(stock["percent"]), "stock_id": stock["stock_id"], "ind_id": stock["ind_id"], "ind_name": stock["ind_name"], "ind_color": stock["ind_color"], "textname": stock["name"], "segment_name": stock["ind_name"], "weight": weight, "url": "/S/" + stock["code"], "proactive": True, "price": str(stock["current"]), }) remain_weight = 100 - sum(i.get("weight") for i in position_list) cash = round(remain_weight, 2) log.debug("调仓比例:%f, 剩余持仓 :%f", weight, remain_weight) data = { "cash": cash, "holdings": str(json.dumps(position_list)), "cube_symbol": str(self.account_config["portfolio_code"]), "segment": "true", "comment": "", } try: resp = self.s.post(self.config["rebalance_url"], data=data) # pylint: disable=broad-except except Exception as e: log.warning("调仓失败: %s ", e) return None log.debug("调仓 %s: 持仓比例%d", stock["name"], weight) resp_json = json.loads(resp.text) if "error_description" in resp_json and resp.status_code != 200: log.error("调仓错误: %s", resp_json["error_description"]) return [{ "error_no": resp_json["error_code"], "error_info": resp_json["error_description"], }] log.debug("调仓成功 %s: 持仓比例%d", stock["name"], weight) return None
def size_for_node(self, node, client): '''Given a docutils image node, returns the size the image should have in the PDF document, and what 'kind' of size that is. That involves lots of guesswork''' uri = str(node.get("uri")) if uri.split("://")[0].lower() not in ('http', 'ftp', 'https'): uri = os.path.join(client.basedir, uri) else: uri, _ = urllib.urlretrieve(uri) client.to_unlink.append(uri) srcinfo = client, uri # Extract all the information from the URI imgname, extension, options = self.split_uri(uri) if not os.path.isfile(imgname): imgname = missing scale = float(node.get('scale', 100)) / 100 size_known = False # Figuring out the size to display of an image is ... annoying. # If the user provides a size with a unit, it's simple, adjustUnits # will return it in points and we're done. # However, often the unit wil be "%" (specially if it's meant for # HTML originally. In which case, we will use a percentage of # the containing frame. # Find the image size in pixels: kind = 'direct' xdpi, ydpi = client.styles.def_dpi, client.styles.def_dpi extension = imgname.split('.')[-1].lower() if extension in ['svg', 'svgz'] and SVGImage.available(): iw, ih = SVGImage(imgname, srcinfo=srcinfo).wrap(0, 0) # These are in pt, so convert to px iw = iw * xdpi / 72 ih = ih * ydpi / 72 elif extension == 'pdf': if VectorPdf is not None: xobj = VectorPdf.load_xobj(srcinfo) iw, ih = xobj.w, xobj.h else: pdf = LazyImports.pdfinfo if pdf is None: log.warning( 'PDF images are not supported without pyPdf or pdfrw [%s]', nodeid(node)) return 0, 0, 'direct' reader = pdf.PdfFileReader(open(imgname, 'rb')) box = [float(x) for x in reader.getPage(0)['/MediaBox']] iw, ih = x2 - x1, y2 - y1 # These are in pt, so convert to px iw = iw * xdpi / 72.0 ih = ih * ydpi / 72.0 size_known = True # Assume size from original PDF is OK else: keeptrying = True if LazyImports.PILImage: try: img = LazyImports.PILImage.open(imgname) img.load() iw, ih = img.size xdpi, ydpi = img.info.get('dpi', (xdpi, ydpi)) keeptrying = False except IOError: # PIL throws this when it's a broken/unknown image pass if keeptrying and LazyImports.PMImage: img = LazyImports.PMImage(imgname) iw = img.size().width() ih = img.size().height() density = img.density() # The density is in pixelspercentimeter (!?) xdpi = density.width() * 2.54 ydpi = density.height() * 2.54 keeptrying = False if keeptrying: if extension not in ['jpg', 'jpeg']: log.error( "The image (%s, %s) is broken or in an unknown format", imgname, nodeid(node)) raise ValueError else: # Can be handled by reportlab log.warning( "Can't figure out size of the image (%s, %s). Install PIL for better results.", imgname, nodeid(node)) iw = 1000 ih = 1000 # Try to get the print resolution from the image itself via PIL. # If it fails, assume a DPI of 300, which is pretty much made up, # and then a 100% size would be iw*inch/300, so we pass # that as the second parameter to adjustUnits # # Some say the default DPI should be 72. That would mean # the largest printable image in A4 paper would be something # like 480x640. That would be awful. # w = node.get('width') h = node.get('height') if h is None and w is None: # Nothing specified # Guess from iw, ih log.debug( "Using image %s without specifying size." "Calculating based on image size at %ddpi [%s]", imgname, xdpi, nodeid(node)) w = iw * inch / xdpi h = ih * inch / ydpi elif w is not None: # Node specifies only w # In this particular case, we want the default unit # to be pixels so we work like rst2html if w[-1] == '%': kind = 'percentage_of_container' w = int(w[:-1]) else: # This uses default DPI setting because we # are not using the image's "natural size" # this is what LaTeX does, according to the # docutils mailing list discussion w = client.styles.adjustUnits(w, client.styles.tw, default_unit='px') if h is None: # h is set from w with right aspect ratio h = w * ih / iw else: h = client.styles.adjustUnits(h, ih * inch / ydpi, default_unit='px') elif h is not None and w is None: if h[-1] != '%': h = client.styles.adjustUnits(h, ih * inch / ydpi, default_unit='px') # w is set from h with right aspect ratio w = h * iw / ih else: log.error('Setting height as a percentage does **not** work. '\ 'ignoring height parameter [%s]', nodeid(node)) # Set both from image data w = iw * inch / xdpi h = ih * inch / ydpi # Apply scale factor w = w * scale h = h * scale # And now we have this probably completely bogus size! log.info("Image %s size calculated: %fcm by %fcm [%s]", imgname, w / cm, h / cm, nodeid(node)) return w, h, kind
def logElements(elements, func, desc='Unknown'): #pragma: no cover log.debug(desc) for element in elements: typeStr = type(element) func(str(typeStr) + ':' + str(element))
def connect(self): """ Runs the connect loop for the IRC object. This is usually called by __init__ in a separate thread to allow multiple concurrent connections. """ while True: self.initVars() ip = self.serverdata["ip"] port = self.serverdata["port"] checks_ok = True try: # Set the socket type (IPv6 or IPv4). stype = socket.AF_INET6 if self.serverdata.get( "ipv6") else socket.AF_INET # Creat the socket. self.socket = socket.socket(stype) self.socket.setblocking(0) # Set the connection timeouts. Initial connection timeout is a # lot smaller than the timeout after we've connected; this is # intentional. self.socket.settimeout(self.pingfreq) # Enable SSL if set to do so. This requires a valid keyfile and # certfile to be present. self.ssl = self.serverdata.get('ssl') if self.ssl: log.info('(%s) Attempting SSL for this connection...', self.name) certfile = self.serverdata.get('ssl_certfile') keyfile = self.serverdata.get('ssl_keyfile') if certfile and keyfile: try: self.socket = ssl.wrap_socket(self.socket, certfile=certfile, keyfile=keyfile) except OSError: log.exception( '(%s) Caught OSError trying to ' 'initialize the SSL connection; ' 'are "ssl_certfile" and ' '"ssl_keyfile" set correctly?', self.name) checks_ok = False else: # SSL was misconfigured, abort. log.error( '(%s) SSL certfile/keyfile was not set ' 'correctly, aborting... ', self.name) checks_ok = False log.info("Connecting to network %r on %s:%s", self.name, ip, port) self.socket.connect((ip, port)) self.socket.settimeout(self.pingtimeout) # If SSL was enabled, optionally verify the certificate # fingerprint for some added security. I don't bother to check # the entire certificate for validity, since most IRC networks # self-sign their certificates anyways. if self.ssl and checks_ok: peercert = self.socket.getpeercert(binary_form=True) sha1fp = hashlib.sha1(peercert).hexdigest() expected_fp = self.serverdata.get('ssl_fingerprint') if expected_fp: if sha1fp != expected_fp: # SSL Fingerprint doesn't match; break. log.error( '(%s) Uplink\'s SSL certificate ' 'fingerprint (SHA1) does not match the ' 'one configured: expected %r, got %r; ' 'disconnecting...', self.name, expected_fp, sha1fp) checks_ok = False else: log.info( '(%s) Uplink SSL certificate fingerprint ' '(SHA1) verified: %r', self.name, sha1fp) else: log.info( '(%s) Uplink\'s SSL certificate fingerprint ' 'is %r. You can enhance the security of your ' 'link by specifying this in a "ssl_fingerprint"' ' option in your server block.', self.name, sha1fp) if checks_ok: # All our checks passed, get the protocol module to connect # and run the listen loop. self.proto.connect() self.spawnMain() log.info('(%s) Starting ping schedulers....', self.name) self.schedulePing() log.info('(%s) Server ready; listening for data.', self.name) self.run() else: # Configuration error :( log.error( '(%s) A configuration error was encountered ' 'trying to set up this connection. Please check' ' your configuration file and try again.', self.name) except (socket.error, ProtocolError, ConnectionError) as e: # self.run() or the protocol module it called raised an # exception, meaning we've disconnected! log.warning('(%s) Disconnected from IRC: %s: %s', self.name, type(e).__name__, str(e)) # The run() loop above was broken, meaning we've disconnected. self._disconnect() # If autoconnect is enabled, loop back to the start. Otherwise, # return and stop. autoconnect = self.serverdata.get('autoconnect') log.debug('(%s) Autoconnect delay set to %s seconds.', self.name, autoconnect) if autoconnect is not None and autoconnect >= 1: log.info('(%s) Going to auto-reconnect in %s seconds.', self.name, autoconnect) time.sleep(autoconnect) else: log.info( '(%s) Stopping connect loop (autoconnect value %r is < 1).', self.name, autoconnect) return
def _load_webpack_config() -> WebpackConfig: log.debug(f'Getting config from webpack config using {GET_CONFIG_SCRIPT_PATH}.') proc = run(['node', GET_CONFIG_SCRIPT_PATH], stdout=PIPE) log.debug(f'Output: {proc.stdout}.') config = loads(proc.stdout) dev_server_host = config.get('dev_server_host', None) assert isinstance(dev_server_host, str), f'dev_server_host must be a string, got {dev_server_host}.' assert is_valid_host(dev_server_host), f'dev_server_host must be a valid IP or hostname, got {dev_server_host}.' if dev_server_host == '0.0.0.0': dev_server_host = '127.0.0.1' log.debug(f'Local dev server will use host {dev_server_host}.') dev_server_port = config.get('dev_server_port', None) if isinstance(dev_server_port, str): try: dev_server_port = int(dev_server_port, 10) except ValueError: pass assert isinstance(dev_server_port, int), f'dev_server_port must be a number, got {dev_server_port}.' assert is_valid_port(dev_server_port), f'dev_server_port must be a valid port, got {dev_server_port}.' log.debug(f'Local dev server will use port {dev_server_port}.') build_dir = config.get('build_dir', None) assert isinstance(build_dir, str), f'build_dir must be a string, got {build_dir}.' build_dir = Path(build_dir) log.debug(f'Local builds will be in {build_dir}.') upstream_url = config.get('upstream_url', None) assert isinstance(upstream_url, str), f'upstream_url must be a string, got {upstream_url}.' try: parse_url(upstream_url) except ValueError as e: assert False, f'upstream_url must be a valid URL, got {upstream_url} ({e}).' log.debug(f'Upstream builds will be at {upstream_url}.') return WebpackConfig(dev_server_host, dev_server_port, build_dir, upstream_url)
def send(self, data): self.messages.append(data) log.debug('-> ' + data)
def create_slack_channel(self, incident): # Create channel log.info("Creating Slack channel " + incident.slack_channel + "...") try: channel = self.slack_fake_user.channels.create( name=incident.slack_channel).body['channel'] log.debug("... created Slack channel with ID " + channel['id']) except SlackerError as e: if str(e) == "name_taken": log.debug("... channel already exists, searching the existing one") channels_list = self.slack.channels.list().body['channels'] channel = [chan for chan in channels_list if chan['name'] == incident.slack_channel] if len(channel) != 1: raise Exception("Failed to lookup channel that should exist") channel = channel[0] log.debug("... existing channel found, continuing using channel " + channel['id']) pass else: raise e incident.slack_channel_id = channel['id'] # Join channel log.debug("Fake user joining channel ...") self.slack_fake_user.channels.join(name=channel['name']) log.debug("... joined channel") # Invite Dialogflow user log.debug("Inviting Dialogflow user to incident channel ...") self.invite_user_to_channel( user=self.apiai_user['name'], user_id=self.apiai_user['id'], channel=incident.slack_channel, channel_id=incident.slack_channel_id ) log.debug("Invited user") # Invite App user log.debug("Inviting self (app) user to incident channel ...") self.invite_user_to_channel( user=self.slack_self_user['name'], user_id=self.slack_self_user['id'], channel=incident.slack_channel, channel_id=incident.slack_channel_id ) log.debug("Invited user") # Define purpose and title log.debug("... defining channel purpose") self.slack.channels.set_purpose( channel = incident.slack_channel_id, purpose = "Incident " + incident.priority.value.upper() + " " + str(incident.id) + " - Incident management room" ) log.debug("... defined channel purpose") log.debug("... defining channel title") self.slack.channels.set_topic( channel = incident.slack_channel_id, topic = incident.title ) log.debug("... defined channel title")
bat.vbat=vbat.val tk.Label(settings_page, bat.vbat, decoration='Battery: {:.2} Volt').grid(columnspan=3) tk.Label(settings_page, 'Hintergrundbeleuchtung').grid(columnspan=3) bgled=tk.Var(100) tk.Slider(settings_page,bgled,min=1, command=lambda x: root.backlight(x.val)).grid(columnspan=2, row=2) tk.Label(settings_page,bgled, decoration='{}%').grid(column=2, row=2) foto_page=top_menue.add_page(title='Fotos', title_fg=tk.WHITE, title_bg=tk.RED) tk.Label(foto_page, 'Fotos').grid() for r in rooms: page=licht_menue.add_page(title=r[0], side=0, title_fg=tk.BLACK, title_bg=tk.YELLOW) for row,id in enumerate(r[1]): log.debug('add new light {} in room {}'.format(id, r[0])) #print('deconz/lights/{}/state'.format(id)) topic='controller/lights/{}/state'.format(id) #lf=tk.Frame(side=1) #page.pack(lf) lights[id]=(tk.Var(0, t=bool),tk.Var(0, t=int), topic) tk.Label(page, id, decoration='L{}: ').grid(row=row, column=0) tk.Slider(page, lights[id][1], lights[id][0], command=lambda x,topic=topic: mqtt.publish(topic, str(x.val)), select_command=lambda x,topic=topic: mqtt.publish(topic, 'on' if x.val else 'off') ).grid(row=row, column=1, columnspan=4) tk.Label(page, lights[id][1], decoration='{}%').grid(row=row, column=5 ) rfid=RFID(rx=15,tx=2,freq=1,new_tag_cmd=lambda x,topic='audio/cmd/play': mqtt.publish(topic, str(x)), tag_removed_cmd=lambda x,topic='audio/cmd/stop': mqtt.publish(topic, str(x)))
def SetVelocityRight(self): log.debug('set vel right -- ' + str(self.facing)) if self.facing == FACINGLEFT: self.turning = 'right' self.walking = 1 Movable.SetVelocityRight(self)
def NotifyOutOfBounds(self, bounds): if not self.dying: log.debug("MawJumper WENT OFF THE MAP") self.Die()
def GET(self): ''' Method:get ''' try: log.info("CI:Get") query = HTTPAdaptor.format_query(web.ctx.env["QUERY_STRING"]) if not (query.has_key("author") and query.has_key("commit_id") and query.has_key("token")): raise InvalidQueryStringError author = query.get("author") commit_id = query.get("commit_id") token = query.get("token") # if not utils.validate_token( author = author, commit_id = commit_id, url = config.server["cvproxy"],token = token): # raise AuthenticationError sopt = SQLAdaptor() request_num = sopt.GetUnprocessedRequest(author) request_url = "/".join( [config.server["cvproxy"], "myunprocessedrequests"]) author_info = { "name": author, "request_url": request_url, "request_num": request_num } dashboard_url = "/".join([config.server["cvproxy"], "dashboard"]) navi = [ # {"name":"Dashboard", # "url":dashboard_url, # "theme":"unselected" # }, { "name": "Current review request", "url": "#", "theme": "selected" } ] url = { "ignore": { "absolute": "/".join([ config.server["cvproxy"], "submitaction?action=ignore&commit_id=%s" % commit_id ]), "relative": "submitaction?action=Ignore&commit_id=%s" % commit_id }, "revoke": { "absolute": "/".join([ config.server["cvproxy"], "submitaction?action=revoke&commit_id=%s" % commit_id ]), "relative": "submitaction?action=Revoke&commit_id=%s" % commit_id } } cijob = sopt.GetJobNameByCommit(commit_id) files_committed = sopt.GetCommitedFilesNum(commit_id) downstreamjobs = sopt.GetDownstreamJobs(cijob) if downstreamjobs: project_info = sopt.GetProjectInfo(downstreamjobs[0]) test_info = sopt.GetTestSummary(downstreamjobs) results, failed_jobs = sopt.GetTestResult(downstreamjobs) else: project_info = sopt.GetProjectInfo(cijob) test_info = sopt.GetTestSummary([cijob]) results, failed_jobs = sopt.GetTestResult([cijob]) log.debug("downstream jobs: %s" % downstreamjobs) log.debug("project_info: %s" % project_info) log.debug("test_info: %s" % test_info) info = { "product": project_info["product"], "platform": project_info["platform"], "version": project_info["version"], "files_committed": files_committed, "tests_executed": test_info["tests"], "failures": test_info["failures"] } ci_url = config.ciserver["urlprefix"] jobs = [] jobs.append({ "jobpage": "/".join([ci_url, cijob[0]]), "name": cijob[0], "overview": "/".join([ci_url, cijob[0], cijob[1]]), "console": "/".join([ci_url, cijob[0], cijob[1], "console"]), "workspace": "/".join([ci_url, cijob[0], "ws"]), "testresult": "/".join([ci_url, cijob[0], cijob[1], "TestReport/?"]), "logs": "/".join([ci_url, cijob[0], cijob[1], "artifact/logs"]), "results": results, "failed_jobs": opts.FormatFailedJobs(failed_jobs) }) categories = sopt.GetCategory() diff_list = sopt.GetDiffSetByCommit(commit_id) codes = opts.FormatDiffs(diff_list) return render.render_template('current_review_request.tmpl', author=author_info, navi=navi, url=url, commit_id=commit_id, info=info, categories=categories, jobs=jobs, codes=codes) except Exception, e: log.error(e) return HTTPAdaptor.format_response("error", "Request processing failed.")
def DecideAnimation(self): if self.turning != 0 and self.animation != self.turnAnim: log.debug('setting turn') self.turnAnim.Reset() self.SetAnimation(self.turnAnim) elif self.jumping and self.animation != self.jumpAnim \ and not self.peaking \ and not self.turning: log.debug('setting jump') self.jumpAnim.Reset() self.SetAnimation(self.jumpAnim) elif self.jumping and self.peaking \ and self.animation != self.peakAnim \ and not self.turning: log.debug('setting peak') self.peakAnim.Reset() self.SetAnimation(self.peakAnim) elif self.landing and self.animation == self.fallAnim \ and not self.turning: log.debug('setting land') self.landAnim.Reset() self.SetAnimation(self.landAnim) #mark here #self.landing = 0 elif self.startedFalling and self.animation != self.fallAnim \ and not self.turning: log.debug('setting fall') self.fallAnim.Reset() self.SetAnimation(self.fallAnim) self.startedFalling = 0 elif self.walking and self.animation != self.walkAnim \ and self.isOnSomeGround \ and not (self.turning or self.jumping or self.startedFalling): log.debug('setting walk') self.SetAnimation(self.walkAnim) elif self.animation != self.restAnim \ and self.isOnSomeGround \ and not (self.turning or self.jumping or self.startedFalling \ or self.walking or self.landing): log.debug('setting rest') self.SetAnimation(self.restAnim)
def removePortGroup(self, portGroupName): log.debug('Removing Portgroup %s from Virtual Switch %s' % (portGroupName, self)) self.__vswitch.RemovePortGroup(portGroupName)
def remove(self): log.debug('Removing VmKernel NIC %s' % self) _vmkernelNicInfo.RemoveVmKernelNic(self.name)
def SetVelocityLeft(self): log.debug('set vel left -- ' + str(self.facing)) if self.facing == FACINGRIGHT: self.turning = 'left' self.walking = 1 Movable.SetVelocityLeft(self)
def cosConnectForInstaller(failOnWarnings=True, onlyConfiguredNics=True): '''Like cosConnect, but it uses userchoices and if you use this function exclusively, it will keep only one connected Vnic at a time. Tries to make a connection in the following order, stopping after the first successful connection is made: 1. try to use the config in userchoices.*DownloadNic and *DownloadNetwork 2. try to use the config in userchoices.*CosNic and *CosNetwork 3. try to use DHCP connections on any remaining NICs Arguments: failOnWarnings: if True, raise an exception on otherwise survivable warnings onlyConfiguredNics: if True, don't attempt any nics that haven't been configured by the user. ie, don't try #3 above ''' log.info('Attempting to bring up the network.') def doRaise(msg): raise Exception(msg) if failOnWarnings: logOrRaise = doRaise else: logOrRaise = log.warn # ensure we're only manipulating one COS nic disconnectDownloadNetwork() global _connectedVNic if _connectedVNic: log.info('Brought down the already-enabled Virtual NIC.') _connectedVNic = None argsForCosConnect = [] allNicChoices = [] downloadNicChoices = userchoices.getDownloadNic() if downloadNicChoices: log.info('The user chose specific network settings for downloading' 'remote media. Those choices will be attempted first.') if not downloadNicChoices['device']: availableNIC = getPluggedInAvailableNIC(None) if availableNIC: downloadNicChoices.update(device=availableNIC) allNicChoices.append(downloadNicChoices) else: logOrRaise('Could not find a free Physical NIC to attach to' ' download network specifications %s' % str(downloadNicChoices)) else: allNicChoices.append(downloadNicChoices) cosNicChoices = userchoices.getCosNICs() if cosNicChoices: allNicChoices += cosNicChoices else: msg = 'No COS NICs have been added by the user.' logOrRaise(msg) for nicChoices in allNicChoices: log.debug('nicChoices %s' %str(nicChoices)) log.debug('Setting vlan (%(vlanID)s), ipConf (%(bootProto)s, %(ip)s, %(netmask)s) ' 'for NIC %(device)s' % nicChoices) assert nicChoices['device'] nic = nicChoices['device'] # a reference to a PhysicalNicFacade vlanID = nicChoices['vlanID'] if not vlanID: vlanID = None #make sure it's None, not just '' if nicChoices['bootProto'] == userchoices.NIC_BOOT_STATIC: if not nicChoices['ip'] or not nicChoices['netmask']: msg = ('COS NIC %s is not fully defined. Missing ' 'IP address or netmask' % str(nic)) logOrRaise(msg) ipConf = StaticIPConfig(nicChoices['ip'], nicChoices['netmask']) elif nicChoices['bootProto'] == userchoices.NIC_BOOT_DHCP: ipConf = DHCPIPConfig() else: msg = 'Unknown bootProto specified for %s' % nic logOrRaise(msg) ipConf = DHCPIPConfig() argsForCosConnect.append((nic, vlanID, ipConf)) if not onlyConfiguredNics: # we've tried all the user-configured nics, now try the rest with DHCP configuredNics = [choices['device'] for choices in allNicChoices] unConfiguredNics = set(getPhysicalNics()) - set(configuredNics) # sort these for repeatability's sake. unConfiguredNics = list(unConfiguredNics) unConfiguredNics.sort() for nic in unConfiguredNics: if not nic.isLinkUp: continue # it would be pointless to try unplugged NICs log.info('Setting unconfigured NIC %s to use DHCP' % nic) ipConf = DHCPIPConfig() argsForCosConnect.append((nic, None, ipConf)) for nic, vlanID, ipConf in argsForCosConnect: try: log.info('Bringing up network interface for NIC %s. Using ipConf %s' % (nic,ipConf)) vnic = cosConnect(pNic=nic, vlanID=vlanID, ipConf=ipConf) except vmkctl.HostCtlException, ex: msg = 'vmkctl HostCtlException:'+ ex.GetMessage() logOrRaise(msg) else: log.info('COS has an enabled Virtual NIC %s.' % vnic) _connectedVNic = vnic break #we only need one to work
sys.exit(ExitCodes.WAIT_THEN_REBOOT) # replace the default exception hook with a friendlier one. This # is just for parsing the boot cmdline, as GUI, Text, and Scripted # install methods will replace the sys.excepthook with their own sys.excepthook = lambda type, value, tb: \ exception.handleException(None, (type, value, tb), traceInDetails=False) # To make testing easier, pull from environment first. # It's either in $BOOT_CMDLINE or the /proc/cmdline file cmdlineFile = open('/proc/cmdline', 'r') bootCmdLine = os.environ.get('BOOT_CMDLINE', cmdlineFile.read()) opts.extend(translateBootCmdLine(bootCmdLine)) log.debug('command line options: %s' % str(opts)) for opt, arg in opts: if (opt == '-t' or opt == '--text'): userchoices.setRunMode(userchoices.RUNMODE_TEXT) elif (opt == '-d' or opt == '--debug'): userchoices.setDebug(True) elif (opt == '--nox'): userchoices.setStartX(False) elif (opt == '-s' or opt == '--script'): userchoices.setRunMode(userchoices.RUNMODE_SCRIPTED) userchoices.setRootScriptLocation(arg) elif (opt == '--url'): userchoices.setMediaLocation(arg) elif (opt == '--debugpatch'): userchoices.setDebugPatchLocation(arg)
def remove(self): log.debug('Removing Virtual NIC %s' % str(self)) _consoleNicInfo.RemoveServiceConsoleNic(self.name)
def remove(self): log.debug('Removing Virtual Switch %s' % str(self)) self._clearUplinks() for portGroup in self.portGroups: self.removePortGroup(portGroup.name) _vswitchInfo.RemoveVirtualSwitch(self.name)