def fromPath(path): # Determine how to read the layer if path.endswith(".arg"): path = path[:-4] + ".json" if not os.path.exists(path): log.error("Could not find arg metadata file at " + path) if path.endswith(".json"): # Read metadata jf = open(path) meta = json.load(jf) jf.close() layer_type = meta["type"] if "path" in meta: data_path = meta["path"] else: data_path = path[:-5] + ".arg" if layer_type == "geotiff": if not data_path: data_path = "%s.tif" % path[:-5] return GdalLayer(meta, data_path) # elif layer_type == "tiled": # if not data_path: # data_path = path[:-5] # return TiledLayer(meta,data_path) # elif layer_type == "constant": # return ConstantLayer(meta,data_path) elif layer_type == "arg": return ArgLayer(meta, path, data_path) else: log.error("Layer type %s in file %s is not support" % (layer_type, path)) else: return GdalLayer(path)
def onHumanChanged(self, event): if event.change == 'proxyChange': if event.proxy != 'clothes': return proxy = event.proxy_obj if not proxy: return foot_pose = proxy.special_pose.get("foot", None) if not foot_pose: return filename = getpath.thoroughFindFile(foot_pose, self.paths) if not os.path.isfile(filename): log.error("Could not find a file for special_pose foot %s, file does not exist.", filename) return if event.action == 'add': self.loadFootPose(filename) elif event.action == 'remove': if self.selectedFile and getpath.isSamePath(filename, self.selectedFile): self.loadFootPose(None) if event.change == 'poseChange' and not self._setting_pose: if self.selectedPose: self.applyFootPose(self.selectedPose) if event.change == 'reset': # Update GUI after reset (if tab is currently visible) self._setting_pose = False self.loadFootPose(None)
def execute(cmd, root_helper=None, process_input=None, addl_env=None, check_exit_code=True, return_stderr=False, log_fail_as_error=True, extra_ok_codes=None): try: obj, cmd = create_process(cmd, root_helper=root_helper, addl_env=addl_env) _stdout, _stderr = (process_input and obj.communicate(process_input) or obj.communicate()) obj.stdin.close() m = "\nCommand: %s\nExit code: %s\nStdout: %r\n, Stderr: %r" % (cmd, obj.returncode, _stdout, _stderr) extra_ok_codes = extra_ok_codes or [] if obj.returncode and obj.returncode in extra_ok_codes: obj.returncode = None if obj.returncode and log_fail_as_error: LOG.error(m) else: LOG.debug(m) if obj.returncode and check_exit_code: raise RuntimeError(m) finally: # NOTE(termie): this appears to be necessary to let the subprocess # call clean something up in between calls, without # it two execute calls in a row hangs the second one greenthread.sleep(0) return return_stderr and (_stdout, _stderr) or _stdout
def __init__(self, obj, name): """ This method initializes an instance of the Target class. Parameters ---------- obj: *3d object*. The base object (to which a target can be applied). This object is read to determine the number of vertices to use when initializing this data structure. name: *string*. The name of this target. """ self.name = name self.morphFactor = -1 try: self._load(self.name) except: self.verts = [] log.error('Unable to open %s', name) return self.faces = obj.getFacesForVertices(self.verts)
def get_server_warnings(warn_id): conn = getDBConn() warning={} try: cursor = conn.cursor() cursor.callproc('sp_get_server_warnings_detail',(warn_id,)) result = cursor.fetchone() print result if result==None: return make_response('Resource not Found',404) warning['id']=result['id'] warning['tenant_id']=result.get('tenant_id','') warning['strategy_id']=result.get('strategy_id','') warning['name']=result.get('name','') warning['type']=result.get('type','') warning['description']=result.get('description','') warning['level']=result.get('level','') warning['server_id']=result.get('server_id','') warning['created']=str(result.get('created','')) warning['status']=result.get('status','') cursor.close() except Exception as exc: log.error(exc) return make_response(json.dumps(exc.message), 500) finally: conn.close() return make_response(json.dumps({'warnings':warning}), 200)
def cleanData(gpio, x): global past old = None if gpio in past: old = past[gpio][0] old.extendleft(x) else: old = deque(x, 100) stdd = stddev(list(old)) med = median(x) n = list() for i in x: if abs(i - med) < stdd: n.append(i) if len(n) > 0: val = average(n) past[gpio] = (old, val, time.time()) else: log.error("No valid temp data for gpio(%d) orig(%s) cleaned(%s)" % (gpio, str(x), str(n))) return past[gpio][1]
def run(self): """线程主函数 循环运行,接受新的客户端的连接。 """ log.info('GPS server thread: start, port: %d' % self.gps_port) try: server = socket.socket(socket.AF_INET, socket.SOCK_STREAM) server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) server.bind(('0.0.0.0', self.gps_port)) server.listen(1) server.settimeout(3) # timeout: 3s while self.running: try: conn, address = server.accept() conn.settimeout(3) self.got_client_cb(conn, address) log.debug('new client from: %s' % str(address)) except socket.timeout: pass server.close() log.info('GPS server thread: bye') except Exception as e: log.error('GPS server thread error: %s' % e) self.running = False
def write(self,values): """ Writes values to the buffer. values is a list-like structure with the data to write """ endian = '>' outputfmt = "%s%d%s" % (endian,len(values),self.sfmt) try: self.buf.write(struct.pack(outputfmt, *values)) except Exception, e: if self.verify: print 'Verifying data...' for v in values: #TODO: Handle bit types specially # Pack and unpack, see if we get the same value failed = False nv = None try: nv = struct.unpack(endian + self.sfmt, struct.pack(endian + self.sfmt, v))[0] except struct.error, e: print e failed = True # Note, is both nv and v are nan, check will fail: # nan != nan, but in our case we want that to be # true so we explicitly check for it here if failed or \ (nv != v and (not math.isnan(nv) and math.isnan(v))): log.error('Verification failed. Trying to '\ 'convert to %s resuled in: %s -> %s' %\ (self.datatype, v, nv))
def get_device_warnings_list(): requestParam = {} requestParam['order_by'] = request.args.get('order_by', 'created_time') requestParam['desc'] = request.args.get('desc', 'false') requestParam['start_time'] = request.args.get('start_time', None) requestParam['end_time'] = request.args.get('end_time', None) conn = getDBConn() try: cursor = conn.cursor() cursor.callproc('sp_get_device_warnings',(requestParam['start_time'],requestParam['end_time'] )) resultSet = cursor.fetchall() warnings=[] for result in resultSet: warning={} warning['id']=result.get('id','') warning['strategy_id']=result.get('strategy_id','') warning['name']=result.get('name','') warning['type']=result.get('type','') warning['description']=result.get('description','') warning['level']=result.get('level','') warning['object_id']=result.get('object_id','') warning['object_type']=result.get('object_type','') warning['created']=str(result.get('created','')) warning['status']=result.get('status','') warnings.append(warning) cursor.close() if len(warnings)>0 : warnings = sorted(warnings, key=lambda k: k['created'],reverse= (requestParam['desc']=='true')) except Exception as exc: log.error(exc) return make_response(json.dumps(exc.message), 500) finally: conn.close() return make_response(json.dumps({'warnings':warnings}), 200)
def getShader(path, defines=[], cache=None): shader = None cache = cache or _shaderCache path1 = path + '_vertex_shader.txt' path2 = path + '_fragment_shader.txt' path3 = path + '_geometry_shader.txt' paths = [p for p in [path1, path2, path3] if os.path.isfile(p)] if not paths: log.error('No shader file found at path %s. Shader not loaded.', path) cache[path] = False return False mtime = max(os.path.getmtime(p) for p in paths) cacheName = path if defines: # It's important that the defines are sorted alfpabetically here cacheName = cacheName + "@" + "|".join(defines) if cacheName in cache: shader = cache[cacheName] if shader is False: return shader if mtime > shader.modified: log.message('reloading %s', cacheName) try: shader.initShader() shader.modified = mtime except RuntimeError, _: log.error("Error loading shader %s", cacheName, exc_info=True) shader = False
def walktree(top, callback, param): if not os.access(top, os.F_OK): return 0 if param.has_key("igndir") and top.find(param["igndir"]) >= 0: return 0 filecount = 0 for f in os.listdir(top): if f.find(".svn") != -1: continue pathname = os.path.join(top, f) try: st = os.stat(pathname) if not st: continue mode = st.st_mode if S_ISDIR(mode): filecount += walktree(pathname, callback, param) elif S_ISREG(mode): try: callback(pathname, param) except Exception as e: log.error(e) if os.access(pathname, os.F_OK): filecount += 1 except: continue return filecount
def _read_vpc_info(): if not os.path.exists(_aws_vpc_info): logger.error("read %s : No such file." % _aws_vpc_info) return None with open(_aws_vpc_info, 'r+') as fd: tmp = fd.read() return json.loads(tmp)
def start(self, host, port, debug=False): """ Start REST server Parameters: host: address to bind to port: port to bind to debug(bool): Debug flag Returns: 0 on success """ for ssl_ctx_file in self.context: if not os.path.isfile(ssl_ctx_file): log.error("SSL cert or key file missing.") return -1 self.process = multiprocessing.Process(target=self.app.run, kwargs={'host': host, 'port': port, 'ssl_context': self.context, 'debug': debug, 'use_reloader': False, 'processes': 1}) self.process.start() return 0
def load_config(configfile=None): global _cached_config if _cached_config: return _cached_config config = ConfigParser.SafeConfigParser() # First load the default config try: config.readfp(open(os.path.join(os.path.dirname(__file__), "..", "data", "defaults"))) log.debug("Loaded default configuration") except IOError: log.error("Unable to load default configuraiton. The program" " may not work correctly.") # Now load the system/user specific config (if any) if configfile: loaded = config.read([configfile]) else: loaded = config.read(['/etc/circusrc', os.path.expanduser('~/.circusrc')]) log.debug("Loaded config files: %s" % ', '.join(loaded)) _cached_config = config return config
def _read_install_conf(): if not os.path.exists(_aws_proxy_install_conf): logger.error("read %s : No such file." % _aws_proxy_install_conf) return None with open(_aws_proxy_install_conf, 'r+') as fd: tmp = fd.read() return json.loads(tmp)
def delete_heat(stack_id): try: heat_client.stacks.delete(stack_id) except Exception as e: log.error( str(type(e)) + str(e.args) + str(e.message) ) return False return True
def run(): args = sys.argv target = None if len(args) <=1: # no arguments , help args.append('help') if len(args) >1 : commandName = args[1] if commandName == 'help': commandName = 'uhelp' args = args[2:] try: command = __import__( 'commands.' + commandName , globals() , locals() , ['run' , 'options'] ) except: raise log.error('Invalid commandName: ' + commandName ) return argInfo = cli.parseArgv(args , command.options) if len(argInfo['errors']) > 0: for error in argInfo['errors']: log.log(error) log.error('Invalid command line. Try `ursa help <command>`'); return; command.run(argInfo['params'],argInfo['options'] );
def loadHandler(self, human, values): if values[0] == 'skinTexture': (fname, ext) = os.path.splitext(values[1]) if fname != "texture": path = os.path.join(os.path.join(mh.getPath(''), 'data', 'skins', values[1])) if os.path.isfile(path): human.setTexture(path) elif ext == ".tif": path = path.replace(".tif", ".png") human.setTexture(path) elif values[0] == 'textures': uuid = values[1] filepath = values[2] if human.hairProxy and human.hairProxy.getUuid() == uuid: if not os.path.dirname(filepath): proxy = human.hairProxy hairPath = os.path.dirname(proxy.file) filepath = os.path.join(hairPath, filepath) human.hairObj.mesh.setTexture(filepath) return elif not uuid in human.clothesProxies.keys(): log.error("Could not load texture for object with uuid %s!" % uuid) return proxy = human.clothesProxies[uuid] if not os.path.dirname(filepath): proxy = human.clothesProxies[uuid] clothesPath = os.path.dirname(proxy.file) filepath = os.path.join(clothesPath, filepath) self.applyClothesTexture(uuid, filepath) return elif values[0] == 'eyeTexture': self.setEyes(human, values[1])
def init_data(self): self.dataset = gdal.Open(self.path, GA_ReadOnly) bands = self.dataset.RasterCount if bands < self.band: log.error("This raster does not contain a band number %d" % band) self.band = self.dataset.GetRasterBand(self.band) self.epsg = projection.get_epsg(self.dataset) xmin, xres, rot1, ymin, rot2, yres = self.dataset.GetGeoTransform() self.rot1 = rot1 self.rot2 = rot2 cols = self.dataset.RasterXSize rows = self.dataset.RasterYSize xmax = xmin + cols * xres ymax = ymin + rows * yres # Since xres and yres can be negative, # we simply use min/max to select the proper bounding # box extent = Extent(min(xmin, xmax), min(ymin, ymax), max(xmin, xmax), max(ymin, ymax)) self._raster_extent = RasterExtent(extent, abs(xres), abs(yres), cols, rows) self._data_type = to_datatype_arg(self.band.DataType) self.fchar = to_datatype_str(self.band.DataType)
def initShader(self): vertexSource = self.path + '_vertex_shader.txt' geometrySource = self.path + '_geometry_shader.txt' fragmentSource = self.path + '_fragment_shader.txt' self.shaderId = glCreateProgram() if os.path.isfile(vertexSource): self.vertexId = self.createShader(vertexSource, GL_VERTEX_SHADER) glAttachShader(self.shaderId, self.vertexId) if os.path.isfile(geometrySource) and 'GL_GEOMETRY_SHADER' in globals(): self.geometryId = self.createShader(geometrySource, GL_GEOMETRY_SHADER) glAttachShader(self.shaderId, self.geometryId) if os.path.isfile(fragmentSource): self.fragmentId = self.createShader(fragmentSource, GL_FRAGMENT_SHADER) glAttachShader(self.shaderId, self.fragmentId) glLinkProgram(self.shaderId) if not glGetProgramiv(self.shaderId, GL_LINK_STATUS): log.error("Error linking shader: %s", glGetProgramInfoLog(self.shaderId)) self.delete() return self.uniforms = None self.updateUniforms()
def getShader(path, cache=None): shader = None cache = cache or _shaderCache path1 = path + '_vertex_shader.txt' path2 = path + '_fragment_shader.txt' path3 = path + '_geometry_shader.txt' paths = [p for p in [path1, path2, path3] if os.path.isfile(p)] if not paths: cache[path] = False return False mtime = max(os.path.getmtime(p) for p in paths) if path in cache: shader = cache[path] if shader is False: return shader if mtime >= shader.modified: log.message('reloading %s', path) try: shader.initShader() shader.modified = mtime except RuntimeError, text: log.error("Error loading shader %s", path, exc_info=True) shader = False
def next(self): try: self.conn.next() except Exception, e: time.sleep(0.05) error("Couldn't process connection: %s" %e) self.connect()
def get_ipaddress(instance_ids, subnet_id): log.debug("get_ipaddress subnet_id(%s) instance_ids:" % subnet_id) log.debug(instance_ids) ret = {} if not len(instance_ids): log.error("instance_ids convert faild.") return ret for instance_id in instance_ids: try: while True: # sql = "select ip.ip_address from ipallocations as ip, ports as port where ip.port_id=port.id and port.device_id='%s'" # tmp = db_neutron.select(db_neutron.get_sql_connection(), sql % instance_id) # log.debug("get_ipaddress tmp") # log.debug(tmp) # if tmp and len(tmp[0]): # ret[instance_id] = tmp[0][0] # break; ports = neutron_client.list_ports(device_id = instance_id) if ports.has_key('ports'): for port in ports['ports']: if port.has_key('fixed_ips') and len(port['fixed_ips']): log.debug("port.has_key('fixed_ips') : %s" % str(port['fixed_ips'])) for ip in port['fixed_ips']: if ip['subnet_id'] == subnet_id: ret[instance_id] = ip['ip_address'] log.debug("ipaddr - Condition match") raise End() time.sleep(5) except End: log.debug("ipaddr - %s" % str(ret[instance_id])) return ret
def saveSettings(self, promptOnFail=False): try: if not os.path.exists(mh.getPath('')): os.makedirs(mh.getPath('')) with outFile("settings.ini") as f: f.write(mh.formatINI(self.settings)) with outFile("shortcuts.ini") as f: for action, shortcut in self.shortcuts.iteritems(): f.write('%d %d %s\n' % (shortcut[0], shortcut[1], action)) with outFile("mouse.ini") as f: for mouseAction, method in self.mouseActions.iteritems(): f.write('%d %d %s\n' % (mouseAction[0], mouseAction[1], method.__name__)) if self.dialog is not None: self.helpIds.update(self.dialog.helpIds) with outFile("help.ini") as f: for helpId in self.helpIds: f.write('%s\n' % helpId) except: log.error('Failed to save settings file', exc_info=True) if promptOnFail: self.prompt('Error', 'Could not save settings file.', 'OK')
def debug_dump(): try: import debugdump debugdump.dump.reset() except Exception as e: import log log.error("Could not create debug dump", exc_info=True)
def run(self): while not self.thread_stop: try: self.fetch_data() except Exception, e: log.error('Something wrong in running ips daemon: %r' % e) time.sleep(self.interval)
def put(self): """ Сохранение новой записи. """ try: data = request.json['data'] good_stub = self.fill_obj(data) obj = self.pre_save(good_stub, data) obj = self.save_model(obj) db.session.flush() self.post_save(obj, data, create_new=True) db.session.commit() except BaseCanoniseResource.CanonException as exc: debug(unicode(exc)) db.session.rollback() abort(400, message=unicode(exc)) except Exception as exc: error(u"Ошибка при создании записи модели %s. %s", unicode( self.model.__class__.__name__), unicode(exc)) db.session.rollback() raise else: pass # db.session.commit() return obj
def onFileSelected(filename): path = os.path.normpath(os.path.join(exportPath, filename)) dir, name = os.path.split(path) name, ext = os.path.splitext(name) if not os.path.exists(dir): os.makedirs(dir) def filename(targetExt, different = False): if not different and ext != '' and ('.' + targetExt.lower()) != ext.lower(): log.warning("expected extension '.%s' but got '%s'", targetExt, ext) return os.path.join(dir, name + '.' + targetExt) found = False for exporter, radio, options in self.formats: if radio.selected: exporter.export(gui3d.app.selectedHuman, filename) found = True break if not found: log.error("Unknown export format selected!") return gui3d.app.prompt('Info', u'The mesh has been exported to %s.' % dir, 'OK', helpId='exportHelp') mh.changeCategory('Modelling')
def loadProxy(human, path, type="Clothes"): try: npzpath = os.path.splitext(path)[0] + '.mhpxy' try: if not os.path.isfile(npzpath): log.message('compiled proxy file missing: %s', npzpath) raise RuntimeError('compiled proxy file missing: %s', npzpath) if os.path.isfile(path) and os.path.getmtime(path) > os.path.getmtime(npzpath): log.message('compiled proxy file out of date: %s', npzpath) raise RuntimeError('compiled file out of date: %s', npzpath) proxy = loadBinaryProxy(npzpath, human, type) except Exception as e: showTrace = not isinstance(e, RuntimeError) log.warning("Problem loading binary proxy: %s", e, exc_info=showTrace) proxy = loadTextProxy(human, path, type) # TODO perhaps proxy type should be stored in .mhclo file too if getpath.isSubPath(npzpath, getpath.getPath()): # Only write compiled binary proxies to user data path try: saveBinaryProxy(proxy, npzpath) except StandardError: log.notice('unable to save compiled proxy: %s', npzpath, exc_info=True) else: log.debug('Not writing compiled proxies to system paths (%s).', npzpath) except: log.error('Unable to load proxy file: %s', path, exc_info=True) return None return proxy
def main(): if len(sys.argv) > 1: mode=sys.argv[1] else: mode='sim' defaults.add_epsilon_as_inset = True sim = TriHoles2D( material='SiN', radius=0.34, numbands=4,#8, k_interpolation=5,#31, resolution=16, mesh_size=7, runmode=mode, num_processors=2, save_field_patterns=False, convert_field_patterns=False) if not sim: log.error('an error occurred during simulation. See the .out file') return log.info(' ##### success! #####\n\n')
def __init__(self, screen, layout, channel, playerid): self.song = None self.time = 0 self.paused = 0 self.stopped = 1 self.playerid = playerid self.keybindings = config.keybindings.general self.songformat = config.playerwindow.songformat self.playerinfofile = config.general.playerinfofile self.songchangecommand = config.general.songchangecommand.strip() if self.songchangecommand: self.songchangecommand = "( %s )&" % self.songchangecommand h, w, y, x, border = layout window.window.__init__(self, screen, h, w, y, x, config.colors.playerwindow, _("Playback Info"), border) try: self.playerinfofd = open(self.playerinfofile, "w") except IOError, e: log.error( _("error '%s' occured during write to playerinfofile") % e) self.playerinfofd = None
def encode_SavetoDB(id): try: cursor = conn.cursor() sql = """SELECT img_path,ctid FROM image WHERE id=%s and octet_length(img_encoding) < 10""" cursor.execute(sql, (str(id), )) logger.info("Select {} Rows".format(cursor.rowcount)) rows = cursor.fetchall() logger.debug(type(rows)) logger.debug(rows) for row in rows: ctid = str(row[1]) img_path = str(row[0]) logger.info("ctid = {0}, image_path = {1}".format(ctid, img_path)) _encoding = encode_face_image(str(row[0])) # logger.debug(type(_encoding)) # logger.debug(_encoding) # before write encoding to database encoding = cPickle.dumps(_encoding) logger.debug(type(encoding)) logger.debug(encoding) try: sql = """UPDATE image SET img_encoding = %s WHERE ctid = %s""" cur = conn.cursor() cur.execute(sql, (pg.Binary(encoding), ctid)) conn.commit() logger.info("Update {} Row Successfully".format(cur.rowcount)) except pg.Error as e: logger.error(e.pgerror) pass logger.info("Finished Update Image Encoding DB!!! ") except pg.Error as e: logger.error(e.pgerror) pass
def getAllNotes(fileNames): """ Get all the notes and chords from the MIDI files in the ./midi_songs directory """ notes = [] songID = 1 for fileName in glob.glob(fileNames): MIDI = song.getMIDI(fileName) print((str(songID) + ". Parsing %s") % fileName) songID += 1 musicElements = None try: # fileName has instrument parts s2 = instrument.partitionByInstrument(MIDI) musicElements = s2.parts[0].recurse() except: # fileName has notes in a flat structure musicElements = MIDI.flat.notes name, mode = song.getScale(MIDI) if mode == "major": halfSteps = majors[name] elif mode == "minor": halfSteps = minors[name] for element in musicElements: if isinstance(element, note.Note): transposedNote = str(element.transpose(halfSteps).pitch) notes.append(transposedNote) elif isinstance(element, chord.Chord): transposedChord = element.transpose(halfSteps) notes.append('.'.join(str(n) for n in element.normalOrder)) else: log.error(f'{element} is an anomaly.') with open(f'../data/notes/{fileNames.split("/")[-3]}/notes', 'wb+') as filepath: pickle.dump(notes, filepath) return notes
def write(self): status = 0 desc = '' index_name = self.index_name + '-' + ( datetime.datetime.now() + datetime.timedelta(seconds=120)).strftime('%Y%m%d%H') last_index = self.index_name + '-' + ( datetime.datetime.now() - datetime.timedelta(seconds=7200)).strftime('%Y%m%d%H') try: url = os.path.join(self.addr, '%s/type_blog/id_123' % index_name) doc = {'doc': {'timestamp': time.time()}} res = requests.post(url, json=doc) res_json = res.json() if res.status_code < 300 and 'result' in res_json and res_json[ 'result'] in ('updated', 'created'): log.info('%s index:%s' % (res_json['result'], index_name)) # 创建新index时删除旧index if res_json['result'] == 'created': # 失败重试3次 i = 0 while True: i += 1 res = requests.delete( os.path.join(self.addr, last_index)) if (i > 3) or (res.status_code == 200 and 'acknowledged' in res.json() and res.json()['acknowledged']): break status = 100 else: log.error('write fail, header:%s, body:%s' % (str(res.headers), res.content)) except Exception as e: log.exception(e) status = 0 desc = "ES ERROR %s" % e status = float(status) return base.metric('status', status, desc=desc),
def quickSave(self): path = os.path.join(self.dirName, self.fileName) overwrite = True dialog = gui.Dialog() if not path.lower().endswith('.target'): error_msg = 'Cannot save target to file: {0:s}\nExpected a path to a .target file'.format( path) dialog.prompt(title='Error', text=error_msg, button1Label='OK') log.error('Cannot save targets to %s. Not a .target file.', path) return else: if os.path.exists(path): msg = 'File {0:s} already exists. Overwrite?'.format(path) overwrite = dialog.prompt(title='Warning', text=msg, button1Label='YES', button2Label='NO') if overwrite: log.message('Overwriting %s ...', path) if overwrite: self.saveTargets(path, self.stripBaseTargets.selected)
def wosd1_revert(self): try: osd_names = subprocess.Popen( ("ceph osd tree | grep host | awk '{print $4}'"), shell=True, stdout=subprocess.PIPE, stderr=None) output_osd, error = osd_names.communicate() get_osd = output_osd.split() ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) ssh.connect(get_osd[0]) stdin, stdout, stderr = ssh.exec_command( "cd /var/lib/ceph/osd/* ;sudo rm ceph_fsid1") if stdout.channel.recv_exit_status() == 0: log.info("Fake fsid file is deleted") except Exception, e: log.error(e)
def __new__(cls, *args, **kwargs): self = super(Texture, cls).__new__(cls) if cls._npot is None: cls._npot = glInitTextureNonPowerOfTwoARB() try: import debugdump debugdump.dump.appendMessage( "GL.EXTENSION: GL_ARB_texture_non_power_of_two %s" % ("enabled" if cls._npot else "not available")) except Exception as e: log.error("Failed to write GL debug info to debug dump: %s", format(str(e))) if cls._powers is None: cls._powers = [2**i for i in range(20)] self.textureId = glGenTextures(1) self.width = 0 self.height = 0 self.modified = None return self
def mba_set(self, sockets, cos_id, mb_max): """ Configures MBA for CoS Parameters: sockets: sockets list on which to configure L3 CAT cos_id: Class of Service mb_max: MBA to set Returns: 0 on success -1 otherwise """ try: cos = self.mba.COS(cos_id, mb_max) for socket in sockets: self.mba.set(socket, [cos]) except Exception as ex: log.error(str(ex)) return -1 return 0
def ecom5_err_revert(self): try: conf_file = self.cluster_name + ".conf" ceph_fsid_1 = subprocess.check_output(['ceph', 'fsid']).strip('\n') with open('/etc/ceph/%s' % (conf_file), 'r'), file: filedata = file.read() filedata = filedata.replace('random_val', ceph_fsid_1) with open('/etc/ceph/%s' % (conf_file), 'w'), file: file.write(filedata) ceph_fsid_2 = subprocess.check_output(['ceph', 'fsid']).strip('\n') if ceph_fsid_1 == ceph_fsid_2: log.info("modified fsid is replaced with original fsid") except Exception, e: log.error(e)
async def new_cmd(message: discord.Message) -> None: nonlocal prefixes, trigger, aliases if prefixes is None: prefixes = client.prefixes # "self" shouldn't be defined, yes? PyCharm thinks otherwise... result, this_prefix = check_prefix(message.content, prefixes) if not result: return else: # No way to do a case-insensitive replace so we simply chop the first X characters with the prefix command = message.content[len(this_prefix):] # We can literally just run everything we did with prefixes with the command as well triggers = [trigger] triggers.extend(aliases) is_us, _ = check_prefix(command, triggers) if not is_us: return else: # we're now guaranteed to run this function, we're home clear now client.command_count += 1 client.debug_response_trace(flag=True) try: await func( command=command, message=message ) # PyCharm thinks these are unexpected, but they're not except Exception: stackdump = traceback.format_exc() embed = discord.Embed( title="Internal error", description= f"There was an error processing the command. Here's the stack trace, if necessary (this is also recorded in the log):\n```{stackdump}```", colour=0xf00000) embed = embed.set_footer( text="Error occurred at " + str(datetime.datetime.utcnow())) await message.channel.send(embed=embed) log.error( f"Error processing command: {message.content}", include_exception=True)
def __login(self): try: """ ### 再次尝试 1.因为网络问题重试 ### 重新注册 2.因为账户被封重试 3.因为账户认证错误重试 """ warning(f"Login -> [{self.usr}:{self.pwd}]") self.__login_payload.update({ "password": self.pwd, "username": self.usr }) resp = self.__refresh_new_target( self.session.post( self.__login_url, data=self.__login_payload, verify=False, timeout=120, )) debug(resp.history) debug(resp.request.headers) debug(resp) if self.usr in resp.text and "暗网欢迎您" in resp.text: success("Auth Success") self.types = Parser.get_current_type(resp) else: error(f"Auth Faild: {self.__clean_log(resp)}") self.__save_error("__login.html", resp) if re.findall("已被封禁|无效的|违规被处理", resp.text): Cursor.ban_user(self.usr) if not self.__reg(): return else: raise ValueError except KeyboardInterrupt: exit()
def loadPose(self, filepath, apply_pose=True): self.currentPose = filepath if not filepath: self.human.resetToRestPose() return if os.path.splitext(filepath)[1].lower() == '.mhp': anim = self.loadMhp(filepath) elif os.path.splitext(filepath)[1].lower() == '.bvh': anim = self.loadBvh(filepath, convertFromZUp="auto") else: log.error("Cannot load pose file %s: File type unknown." % filepath) return #self.human.setAnimateInPlace(True) self.human.addAnimation(anim) self.human.setActiveAnimation(anim.name) self.human.setToFrame(0, update=False) if apply_pose: self.human.setPosed(True)
def l3ca_set(self, sockets, cos_id, ways_mask): """ Configures L3 CAT for CoS Parameters: sockets: sockets list on which to configure L3 CAT cos_id: Class of Service ways_mask: L3 CAT CBM to set Returns: 0 on success -1 otherwise """ try: cos = self.l3ca.COS(cos_id, ways_mask) for socket in sockets: self.l3ca.set(socket, [cos]) except Exception as ex: log.error(str(ex)) return -1 return 0
def dolua(cmd_args): opts, args = getopt.getopt( cmd_args, "p:s:f:", ["process=", "script=", "func=", "abi=", "x86-arm", "update"]) log.info("opts %s args:%s" % (opts, args)) process_name, abi, lua_script, func_name = "", "x86", "", "" need_upate, x86_arm, zygote = False, False, False for op, value in opts: if op == "-s" or op == "--script": lua_script = value elif op == "-f" or op == "--func": func_name = value elif op == "-p" or op == "--process": process_name = value elif op == "--abi": abi = value elif op == "--x86-arm": x86_arm = True elif op == "--update": need_upate = True else: log.error("unkown opt:%s value:%s" % (op, value)) return False if len(opts) == 0: process_name = args[0] if len(args) >= 1 else "" lua_script = args[1] if len(args) >= 2 else "" func_name = args[2] if len(args) >= 3 else "" abi = args[3] if len(args) >= 4 else "" ret, process_id, remote_script, remote_loader, remote_inject_so = Command.lua_check( process_name, lua_script, zygote, abi, x86_arm, need_upate) if not ret: return False shell_cmd = '"%s" luacall --pid="%s" --so="%s" --script="%s" --func="%s" ' % \ (remote_loader, process_id, remote_inject_so, remote_script, func_name) shell_cmd = util.getshell(shell_cmd) if not util.execute_cmd(shell_cmd): return False return True
def get_up_time(bs_data, current_year): try: to_current_year_datetime = moment.date( f"{current_year} " + bs_data.select_one("tr:nth-child(3) > td:nth-child(6)").text ) real_up_time_tree = bs_data.select_one(".author") real_up_time_tree.a.extract() real_up_time_tree.span.extract() real_up_time = moment.date( real_up_time_tree.text.replace("年", "") .replace("月", "") .replace("日", "") ) real_up_time = ( real_up_time if real_up_time._date else to_current_year_datetime ) return debug(real_up_time) except Exception as e: error(f"[Parser->get_up_time]: {e}") return moment.now()
def pull(cmd_args): opts, args = getopt.getopt( cmd_args, "r:l:n:", ["remote-file=", "local-path=", "local-name="]) log.info("opts %s args:%s" % (opts, args)) remote_file, local_path, local_name = "", "", "" for op, value in opts: if op == "-r" or op == "--remote-file": remote_file = value elif op == "-l" or op == "--local-path": local_path = value elif op == "-n" or op == "--local-name": local_name = value else: log.error("unkown opt:%s value:%s" % (op, value)) return False if len(opts) == 0: remote_file = args[0] if len(args) >= 1 else "" local_path = args[1] if len(args) >= 2 else "" local_name = args[2] if len(args) >= 3 else "" if local_path == "": local_path = "./" if not os.path.exists(local_path): os.makedirs(local_path) if util.check_dir(remote_file): # 目录 local_file = local_path + "/" else: remote_fname = os.path.basename(remote_file) if local_name == "": local_name = remote_fname local_file = os.path.join(local_path, local_name) log.info("remote:%s local:%s" % (remote_file, local_file)) shell_cmd = util.getcmd('pull "%s" "%s"' % (remote_file, local_file)) ret, res_str = util.execute_cmd_with_stdout(shell_cmd) if not ret: return False return True
def refresh_mba_bw_status(self): """ Reads MBA BW status from libpqos and save results in shared dict Returns: 0 on success -1 otherwise """ try: supported, enabled = self.cap.is_mba_ctrl_enabled() # convert None to False supported = bool(supported) self.shared_dict['mba_bw_supported'] = supported self.shared_dict['mba_bw_enabled'] = enabled except Exception as ex: log.error("libpqos is_mba_ctrl_enabled(..) call failed!") log.error(str(ex)) return -1 return 0
def ecom5_err_revert(self): try: conf_file = self.cluster_name + ".conf" ceph_fsid_1 = subprocess.check_output(["ceph", "fsid"]).strip("\n") with open("/etc/ceph/%s" % (conf_file), "r"), file: filedata = file.read() filedata = filedata.replace("random_val", ceph_fsid_1) with open("/etc/ceph/%s" % (conf_file), "w"), file: file.write(filedata) ceph_fsid_2 = subprocess.check_output(["ceph", "fsid"]).strip("\n") if ceph_fsid_1 == ceph_fsid_2: log.info("modified fsid is replaced with original fsid") except Exception, e: log.error(e)
def finish(self, stagedRepositoryId): ''' Entry point to trigger staging repository close (finish deploy and close staging repository). ''' log.info('CLOSE staging repository id {}'.format(stagedRepositoryId)) url = '{}/profiles/{}/{}'.format(self._restApiUrl, self._profileId, 'finish') headers = {} headers['Cache-Control'] = 'no-cache' headers['Content-Type'] = 'application/xml' response = self._request(url, headers, byteRequest=bytearray(self._buildRequest({'stagedRepositoryId': stagedRepositoryId}))) if response == '': def checkStatus(): return self.getrepositorystatus(stagedRepositoryId) def isStatusValid(status): if 'type' in status and status['type'] == 'closed': log.info('\trepository status is \'{}\''.format(status['type'])) return 0 if 'notifications' in status and status['notifications'] != '0': log.error('\tlast operation on nexus server failed with {} notifications'.format(status['notifications'])) return 1 log.warning('\trepository is not closed yet... Nexus still working expected status \'{}\' got status \'{}\''.format('closed', status['type'])) return 2 if self._requestManyTimes(checkStatus, isStatusValid, nbTry=tuning.CLOSE_REQ_ATTEMPTS, waitSecond=tuning.CLOSE_DELAY_BETWEEN_REQ_ATTEMPTS, cbLogMessage=lambda tryNumber: log.info('\ttry {} to check if staging repository is closed on nexus'.format(tryNumber))): log.info('staging repository {} closed'.format(stagedRepositoryId)) return True if response: log.error(response) self._logActivityForStep(stagedRepositoryId) log.error('fail to close staging repository {}'.format(stagedRepositoryId)) return False
def do_stat(): log.initialize( co.logname(__file__, instance=str(args.sex)), file_level="debug", console_level="info", ) try: msg = "sex: {} min_size: {} max_size: {} max_rtg: {} max_rtg_dif: {}".format( args.sex, args.min_size, args.max_size, args.max_rating, args.max_rating_dif) log.info(__file__ + " started {}".format(msg)) dba.open_connect() oncourt_players.initialize(yearsnum=8) min_date = tt.now_minus_history_days(history_days_personal(args.sex)) ratings_std.initialize(sex=args.sex, min_date=min_date - datetime.timedelta(days=7)) initialize_results( args.sex, max_rating=args.max_rating, max_rating_dif=args.max_rating_dif, min_date=min_date, ) maker = StatMaker(args.sex, min_size=args.min_size, max_size=args.max_size) maker.process_all(aspect=RECOVERY_ASPECT) log.info(__file__ + " done {}".format(RECOVERY_ASPECT)) maker.process_all(aspect=KEEP_ASPECT) log.info(__file__ + " done {}".format(KEEP_ASPECT)) dba.close_connect() log.info(__file__ + " finished sex: {}".format(args.sex)) return 0 except Exception as err: log.error("{0} [{1}]".format(err, err.__class__.__name__), exc_info=True) return 1
def main(): """ Main entry point """ # parse command line arguments parser = argparse.ArgumentParser() parser.add_argument('-c', '--config', metavar="PATH", default=common.CONFIG_FILENAME, help="Configuration file path") parser.add_argument('--port', metavar=("PORT"), default=[5000], type=int, nargs=1, help="REST API port") parser.add_argument('-V', '--verbose', action='store_true', help="Verbose mode") cmd_args = parser.parse_args() # configure syslog output syslog.openlog("AppQoS") if cmd_args.verbose: log.enable_verbose() # initialize libpqos/Intel RDT interface result = common.PQOS_API.init() if result != 0: log.error("libpqos initialization failed, Terminating...") return # initialize capabilities result = caps.caps_init() if result == 0: # initialize main logic app_qos = AppQoS() # start REST API server server = rest.Server() result = server.start("127.0.0.1", cmd_args.port[0], cmd_args.verbose) if result == 0: # run main logic app_qos.run(cmd_args) # stop REST API server server.terminate() else: log.error("Failed to start REST API server, Terminating...") else: log.error("Required capabilities not supported, Terminating...") # de-initialize libpqos common.PQOS_API.fini()
def get_rem_by_id(self, rem_id: RemId) -> Union[Rem, None]: """Get a Rem by its ID.""" rem: Optional[Rem] = None log.debug(f"Searching Rem (Id '{rem_id}')...'") # search in cache try: rem = self._rem_cache[rem_id] assert rem except KeyError: pass else: log.debug(f"Got Rem: '{rem.name[0]}' (#{rem.rem_id}) from cache.") return rem # define request endpoint: HttpUrl = RemNoteV0Client.base_url + "/get" data = { "apiKey": self._key, "userId": self._user_id, "remId": rem_id, } # post data response: Response = self._session.post(endpoint, data) # check response status if response.status_code != 200: log.error(f"Failed to fetch Rem. Error {response.status_code}") raise HTTPError found = response.json().pop("found", False) if not found: log.warn(f"No Rem found with Id '{rem_id}' in current scope.") else: rem = Rem(**response.json()) self._rem_cache[rem.rem_id] = rem # add to cache log.debug(f"Got Rem: '{rem.name[0]}' (#{rem.rem_id}) from server.") return rem
def execute_cmd(self, cmd, consumeoutput=True, logfile=None, mode='w', wait_time=None): try: log.debug(cmd) channel, _, _ = self.inner_execute_cmd(to_bytes(cmd)) result = bytes() data = channel.recv(self.DEFAULT_BUFFER_SIZE) l = None if logfile: l = open(logfile, mode) def timerstop(spread=2): threading.Event().wait(spread) channel.close() if wait_time: threading.Thread(target=timerstop, args=(wait_time, )).start() while (data or not channel.closed): if consumeoutput: log.info(to_text(data)) else: result += data if l: l.write(to_text(data)) data = channel.recv(self.DEFAULT_BUFFER_SIZE) stat = channel.recv_exit_status() if channel: del channel if l: safe_doing(l.close) return stat, result except BaseException as e: log.error(traceback.format_exc()) return None, None
def install(self): result = 'FAILED' patch_files = self.get_patch_files(self.patch_path, self.filters) if not patch_files: logger.error('No files in %s' % self.patch_path) for absolute_path, relative_path in patch_files: # installed_path is full install path, openstack_installed_file = os.path.join( self.openstack_install_path, relative_path) copy_dir = os.path.dirname(openstack_installed_file) ssh = SSHConnection(self.host_ip, 'root', 'Huawei@CLOUD8!') try: if not stat.S_ISDIR(ssh.get_sftp().stat(copy_dir).st_mode): log.info('create dir: %s' % copy_dir) ssh.get_sftp().mkdir(copy_dir) ssh.put(absolute_path, openstack_installed_file) except IOError, e: if e.args[1] == 'No such file': log.info( 'There is no such dir in host: %s, create dir: %s' % (self.host_ip, copy_dir)) cmd = "mkdir -p %s" % copy_dir utils.remote_execute_cmd(host_ip=self.host_ip, user="******", passwd="Huawei@CLOUD8!", cmd=cmd) else: error_message = 'Exception occure when copy file<%s>, Exception.args: %s' \ % (openstack_installed_file, e.args) log.error(error_message) print(error_message) except Exception, e: error_message = 'Exception occure when copy file<%s>, Exception.args: %s' \ % (openstack_installed_file, e.args) log.error(error_message) print(error_message)
def initShader(self): vertexSource = self.path + '_vertex_shader.txt' geometrySource = self.path + '_geometry_shader.txt' fragmentSource = self.path + '_fragment_shader.txt' self.shaderId = glCreateProgram() self.defineables = [] if os.path.isfile(vertexSource): self.vertexId = self.createShader(vertexSource, GL_VERTEX_SHADER, self.defines, self.defineables) if self.vertexId == None: raise RuntimeError("No vertex shader program compiled, cannot set vertex shader. (%s)" % vertexSource) glAttachShader(self.shaderId, self.vertexId) if os.path.isfile(geometrySource) and 'GL_GEOMETRY_SHADER' in globals(): self.geometryId = self.createShader(geometrySource, GL_GEOMETRY_SHADER, self.defines, self.defineables) if self.geometryId == None: raise RuntimeError("No geometry shader program compiled, cannot set geometry shader. (%s)" % geometrySource) glAttachShader(self.shaderId, self.geometryId) if os.path.isfile(fragmentSource): self.fragmentId = self.createShader(fragmentSource, GL_FRAGMENT_SHADER, self.defines, self.defineables) if self.fragmentId == None: raise RuntimeError("No fragment shader program compiled, cannot set fragment shader. (%s)" % fragmentSource) glAttachShader(self.shaderId, self.fragmentId) glLinkProgram(self.shaderId) if not glGetProgramiv(self.shaderId, GL_LINK_STATUS): log.error("Error linking shader: %s", glGetProgramInfoLog(self.shaderId)) self.delete() return self.vertexTangentAttrId = glGetAttribLocation(self.shaderId, 'tangent') self.uniforms = None self.glUniforms = [] self.updateUniforms()
def collect_english(self) -> list: """ サイトから英単語、日本語を収集する. return: english_info 英単語 japanese_info 日本語 """ english_info = [] japanese_info = [] for url in self.english_traget: try: DRIVER.get(url) except Exception: log.error('collect_english: target url is None') continue try: english_text = [ eng.text for eng in DRIVER.find_elements_by_class_name("eng") ] japanese_text = [ jap.text for jap in DRIVER.find_elements_by_class_name("jap") ] except NoSuchElementException: log.error('collect_english: target text is None') return None english_info.extend(english_text) japanese_info.extend(japanese_text) DRIVER.close() DRIVER.quit() return english_info, japanese_info
def scp(self, local_path, remote_path, username=None, permissions=None): ''' Method to copy local file on a remote host Overriding user allows to copy file in location with restricted permissions ''' user = self.username if username: user = username log.debug("Copy local file '%s' on remote host '%s' in '%s' as '%s'" % (local_path, self.host, remote_path, user)) if not os.path.isfile(local_path): log.error("Local file '%s' does not exist") sftp_client = self.get_sftp_client() # copy local file on remote host in a temp file tmp_remote_path = "/tmp/%s" % ''.join( random.choice(string.ascii_lowercase + string.digits) for _ in range(15)) with open(local_path) as local_file, sftp_client.file( tmp_remote_path, mode='w+') as remote_file: remote_file.write(local_file.read()) # mv this file in the final destination, with the requested user self.run_cmd("mv %s %s" % (tmp_remote_path, remote_path), username=user, debug=None) # file will be owned by the specified user if username: self.run_cmd("sudo chown %s:%s %s" % (username, username, remote_path), debug=None) if permissions: self.run_cmd("sudo chmod %s %s" % (permissions, remote_path), debug=None)
def nextIpAddress(self): self.current_address = None scans = 0 while self.iterator < self.range[-1]: self.iterator += 1 address = IpAddress(str(IPv4Address(self.iterator)), default_helo_host=self.helo_host, subnet=self) if address.ip_address in self.blacklist: error("%s is blacklisted", address.ip_address) continue self.current_address = address self.current_messages = 0 self.last_request = 0 info(self) return self.current_address #cur = self.range[0] #debug("End of subnet reached resetting") #while cur < self.iterator: # address = IpAddress(str(IPv4Address(cur)), default_helo_host = self.helo_host, subnet = self) # if address.ip_address in self.blacklist: # error("%s is blacklisted",address.ip_address) # continue # self.current_address = address # self.current_messages = 0 # self.last_request = 0 # self.interator = cur # info(self) # return self.current_address self.exhausted = True return None
def show_errors(tour_events, daysago, n_warns): """return True if stop work, False if ok or ignore""" sex_match_err_list = [] for tour_evt in tour_events: if tour_evt.tour_id is None: continue for match in tour_evt.matches: if ( match.rnd is not None and match.date is not None and match.first_player is not None and match.first_player.ident is not None and match.second_player is not None and match.second_player.ident is not None and hasattr(match, "detailed_score") and match.detailed_score is not None and match.detailed_score.error > 0 and match.detailed_score.error != error_code("RAIN_INTERRUPT") ): sex_match_err_list.append( (tour_evt.sex, match, match.detailed_score.error) ) if len(sex_match_err_list) > 0 or n_warns > 0: for sex_match_err in sex_match_err_list: sex, match, err = sex_match_err log.error("{} {} err:{}".format(sex, match, err)) if args.interactive: msg = ( "day-{} {} dscore error(s) found, {} warns. Continue? " + "(don't forget switch to browser)" ).format(daysago, len(sex_match_err_list), n_warns) winsound.MessageBeep(winsound.MB_ICONEXCLAMATION) is_continue_ask = cmds.CommandAsk(msg) answer = is_continue_ask.fire() if answer == "n": return True return False
def configure_rdt(): """ Configure RDT Returns: 0 on success """ result = 0 # detect removed pools old_pools = Pool.pools.copy() pool_ids = common.CONFIG_STORE.get_pool_attr('id', None) if old_pools: for pool_id in old_pools: if not pool_ids or pool_id not in pool_ids: log.debug("Pool {} removed...".format(pool_id)) Pool(pool_id).cores_set([]) # remove pool Pool.pools.pop(pool_id) if not pool_ids: log.error("No Pools to configure...") return -1 for pool_id in pool_ids: Pool(pool_id) for pool_id in Pool.pools: result = Pool(pool_id).configure() if result != 0: return result result = Apps().configure() return result