def create_folder(path=None): p = g.wiki.root() if path is not None: try: p = g.wiki.get(path) except PathNotFound: abort(404) if not p.is_node: abort(500) if not p.editable: abort(500) foldername = None if request.form.get('save', None): foldername = request.form.get('foldername') try: p.create_directory(foldername) flash('New folder created', 'success') return redirect(url_for('view', path=p.path)) except PathExists: app.logger.error(format_exc()) flash('There is already a folder by that name', 'error') except Exception, e: app.logger.error(format_exc()) flash('There was a problem creating your folder: %s' % e, 'error')
def track_users(ids): """ Track users by id, writing to jsons folder. """ print 'tracking', len(ids), 'users' outf = io.open(make_output_file(), mode='wt', encoding='utf8') count = 0 for tweet in twutil.collect.track_user_ids(ids): try: outf.write(json.dumps(tweet, ensure_ascii=False, encoding='utf8')) outf.write(u'\n') outf.flush() count += 1 if count > 100000: outf.close() outf = io.open(make_output_file(), mode='wt', encoding='utf8') count = 0 except: e = sys.exc_info() print 'skipping error', e[0] print traceback.format_exc() twutil.collect.reinit() outf.close() track_users(ids) outf.close()
def _get_grpc_custom_bdist(decorated_basename, target_bdist_basename): """Returns a string path to a bdist file for Linux to install. If we can retrieve a pre-compiled bdist from online, uses it. Else, emits a warning and builds from source. """ # TODO(atash): somehow the name that's returned from `wheel` is different # between different versions of 'wheel' (but from a compatibility standpoint, # the names are compatible); we should have some way of determining name # compatibility in the same way `wheel` does to avoid having to rename all of # the custom wheels that we build/upload to GCS. # Break import style to ensure that setup.py has had a chance to install the # relevant package. from six.moves.urllib import request decorated_path = decorated_basename + GRPC_CUSTOM_BDIST_EXT try: url = BINARIES_REPOSITORY + '/{target}'.format(target=decorated_path) bdist_data = request.urlopen(url).read() except IOError as error: raise CommandError( '{}\n\nCould not find the bdist {}: {}' .format(traceback.format_exc(), decorated_path, error.message)) # Our chosen local bdist path. bdist_path = target_bdist_basename + GRPC_CUSTOM_BDIST_EXT try: with open(bdist_path, 'w') as bdist_file: bdist_file.write(bdist_data) except IOError as error: raise CommandError( '{}\n\nCould not write grpcio bdist: {}' .format(traceback.format_exc(), error.message)) return bdist_path
def process(service_name): try: print service_name jsonData = json.loads(request.data) print request.data action = jsonData["action"] isAllow = False for item in utils.config.ALLOW_SERVICE.split(','): if service_name == item: isAllow = True if service_name is None or service_name == '' or not isAllow: return 'No such Service' result = '' if action == 'ACTION_SERVICE_CREATE': result = service.serverService.action_service_create(service_name, jsonData) elif action == 'ACTION_SERVICE_UPDATE': result = service.serverService.action_service_update(service_name, jsonData) elif action == 'ACTION_CLIENT_SERVICE_CREATE': result = service.serverService.action_client_service_create(service_name, jsonData) elif action == 'ACTION_SERVICE_FOLLOWED_CHANGE': result = service.serverService.action_service_followed_change(service_name, jsonData) elif action == 'ACTION_SERVICE_DING': result = service.serverService.action_service_ding(action, jsonData) except Exception, e: logging.error(e) print traceback.format_exc() result.code = "1001" result.message = "unknow Exception" resp = Response(result.get_json()) return resp
def log_project_issue(self,filepath,filename,problem="",detail="",desc=None,opens_with=None): cursor = self.conn.cursor() matches=re.search(u'(\.[^\.]+)$',filename) file_xtn = "" if matches is not None: file_xtn=str(matches.group(1)) else: raise ArgumentError("Filename %s does not appear to have a file extension" % filename) typenum=self.project_type_for_extension(file_xtn,desc=desc,opens_with=opens_with) try: cursor.execute("""insert into edit_projects (filename,filepath,type,problem,problem_detail,lastseen,valid) values (%s,%s,%s,%s,%s,now(),false) returning id""", (filename,filepath,typenum,problem,detail)) except psycopg2.IntegrityError as e: print str(e) print traceback.format_exc() self.conn.rollback() cursor.execute("""update edit_projects set lastseen=now(), valid=false, problem=%s, problem_detail=%s where filename=%s and filepath=%s returning id""", (problem,detail,filename,filepath)) #print cursor.mogrify("""update edit_projects set lastseen=now(), valid=false, problem=%s, problem_detail=%s where filename=%s and filepath=%s returning id""", (problem,detail,filename,filepath)) result=cursor.fetchone() id = result[0] self.conn.commit() return id
def distribute_occupancy_status(self, botengine, status, reason, last_status, last_reason): """ Distribute the newest occupancy status from AI occupancy algorithms :param botengine: BotEngine environment :param status: Current status :param reason: Current reason :param last_status: Last status :param last_reason: Last reason :return: """ for intelligence_id in self.intelligence_modules: try: self.intelligence_modules[intelligence_id].occupancy_status_updated(botengine, status, reason, last_status, last_reason) except Exception as e: botengine.get_logger().warn("location.py - Error delivering occupancy_status_updated to location microservice (continuing execution): " + str(e)) import traceback botengine.get_logger().error(traceback.format_exc()) # Device intelligence modules for device_id in self.devices: if hasattr(self.devices[device_id], "intelligence_modules"): for intelligence_id in self.devices[device_id].intelligence_modules: try: self.devices[device_id].intelligence_modules[intelligence_id].occupancy_status_updated(botengine, status, reason, last_status, last_reason) except Exception as e: botengine.get_logger().warn("location.py - Error delivering occupancy_status_updated message to device microservice (continuing execution): " + str(e)) import traceback botengine.get_logger().error(traceback.format_exc())
def _listenEvent(self): currentEvent = self._newEvent() self._loop=True btnclick=False try: while self._loop==True: r,w,x = select([self._dev], [], []) for event in self._dev.read(): if event.type==3: if event.code==0: currentEvent['x']=event.value elif event.code==1: currentEvent['y']=event.value elif event.code==24: currentEvent['pression']=event.value elif event.type==1: if event.code==330: btnclick=True complete=btnclick for (key,ev) in currentEvent.items(): if ev==-1: complete=False if complete: (currentEvent['x'],currentEvent['y'])=self.calculateCoord(currentEvent['x'],currentEvent['y']) if self._blocking: return currentEvent self._callback(self,currentEvent) currentEvent = self._newEvent() except: print traceback.format_exc() return
def handle(self): while True: try: data = self.request.recv(2*1024*1024) if not data: break #end data = json.loads(data) res = self.execute(data) logger.debug('instance count:' + str(len(self.rpc_instances.keys()))) res = json.dumps(res) res = str(len(res)).rjust(8, '0') + res self.request.send(res) except socket.timeout as err: res = ('error in RequestHandler :%s, res:%s' % (traceback.format_exc(), data)) logger.debug(res) res = json.dumps({'err':'sys.socket.error', 'msg':format(err)}) res = str(len(res)).rjust(8, '0') + res self.request.send(res) self.request.close() break except Exception as err: res = ('error in RequestHandler :%s, res:%s' % (traceback.format_exc(), data)) logger.debug(res) res = json.dumps({'err':'sys.socket.error', 'msg': format(err)}) res = str(len(res)).rjust(8, '0') + res self.request.send(res)
def datastream_updated(self, botengine, address, content): """ Data Stream Updated :param botengine: BotEngine environment :param address: Data Stream address :param content: Data Stream content """ for intelligence_id in self.intelligence_modules: try: self.intelligence_modules[intelligence_id].datastream_updated(botengine, address, content) except Exception as e: botengine.get_logger().warn("location.py - Error delivering datastream message to location microservice (continuing execution): " + str(e)) import traceback botengine.get_logger().error(traceback.format_exc()) # Device intelligence modules for device_id in self.devices: if hasattr(self.devices[device_id], "intelligence_modules"): for intelligence_id in self.devices[device_id].intelligence_modules: try: self.devices[device_id].intelligence_modules[intelligence_id].datastream_updated(botengine, address, content) except Exception as e: botengine.get_logger().warn("location.py - Error delivering datastream message to device microservice (continuing execution): " + str(e)) import traceback botengine.get_logger().error(traceback.format_exc())
def init(self, group_metadata): """Launch the browser that is being tested, and the TestRunner process that will run the tests.""" # It seems that this lock is helpful to prevent some race that otherwise # sometimes stops the spawned processes initialising correctly, and # leaves this thread hung if self.init_timer is not None: self.init_timer.cancel() self.logger.debug("Init called, starting browser and runner") if not self.no_timeout: self.init_timer = threading.Timer(self.browser.init_timeout, self.init_timeout) try: if self.init_timer is not None: self.init_timer.start() self.logger.debug("Starting browser with settings %r" % self.browser_settings) self.browser.start(group_metadata=group_metadata, **self.browser_settings) self.browser_pid = self.browser.pid() except Exception: self.logger.warning("Failure during init %s" % traceback.format_exc()) if self.init_timer is not None: self.init_timer.cancel() self.logger.error(traceback.format_exc()) succeeded = False else: succeeded = True self.started = True return succeeded
def _execute_jobs(self): logger.info('Jobs execution started') try: logger.debug('Lock acquiring') with sync_manager.lock(self.JOBS_LOCK, blocking=False): logger.debug('Lock acquired') ready_jobs = self._ready_jobs() for job in ready_jobs: try: self.__process_job(job) job.save() except LockError: pass except Exception as e: logger.error('Failed to process job {0}: ' '{1}\n{2}'.format(job.id, e, traceback.format_exc())) continue except LockFailedError as e: pass except Exception as e: logger.error('Failed to process existing jobs: {0}\n{1}'.format( e, traceback.format_exc())) finally: logger.info('Jobs execution finished') self.__tq.add_task_at(self.JOBS_EXECUTE, self.jobs_timer.next(), self._execute_jobs)
def buildDicts(n): cleaner = Cleaner() cleaner.javascript = True cleaner.style = True i = 0 tagsDict = set() while (i < n): if (os.path.isfile("spam/%d.txt" % i)): try: readInFile = open("spam/%d.txt" % i) content = readInFile.read() noSymbols = re.sub('[^A-Za-z-]+', ' ', content.lower()) # noSymbols is stripped of symbols tags = set(noSymbols.split()) # allCopy is the set of words without symbols tagsDict = tagsDict.union(tags) except Exception, err: print traceback.format_exc() print sys.exc_info()[0] if (os.path.isfile("notspam/%d.txt" % i)): try: readInFile = open("notspam/%d.txt" % i) content = readInFile.read() noSymbols = re.sub('[^A-Za-z-]+', ' ', content.lower()) # noSymbols is stripped of symbols tags = set(noSymbols.split()) # allCopy is the set of words without symbols tagsDict = tagsDict.union(tags) except Exception, err: print traceback.format_exc() print sys.exc_info()[0]
def tokenize(n, tagsDict): cleaner = Cleaner() cleaner.javascript = True cleaner.style = True i = 0 df = pandas.DataFrame(columns=[list(tagsDict)]) while (i < n): allVector = {} if (os.path.isfile("spam/%d.txt" % i)): try: for word in tagsDict: allVector[word] = 0 readInFile = open("spam/%d.txt" % i) content = readInFile.read() noSymbols = re.sub('[^A-Za-z-]+', ' ', content.lower()) # noSymbols is stripped of symbols allCopy = noSymbols.split() # allCopy is the set of words without symbols for tag in allCopy: df.ix[i[tag]] = df.ix[i[tag]] + 1 df.ix[i['isSpam']] = 'spam' except Exception, err: print traceback.format_exc() print sys.exc_info()[0] i = i + 1
def log(user_id, currentMessage): try: K = list() logExists = os.path.exists('../log/' + str(user_id * -1) + 'log.csv') fieldnames = ['name', 'text'] if logExists: try: with open('../log/' + str(user_id * -1) + 'log.csv', 'r+') as csvfile: reader = csv.DictReader(csvfile) K = list(reader) with open('../log/' + str(user_id * -1) + 'log.csv', 'w+') as csvfile: writer = csv.DictWriter(csvfile, fieldnames=fieldnames) writer.writeheader() for x in K: writer.writerow(x) writer.writerow({'name': currentMessage.from_user.first_name, 'text': currentMessage.text}) except Exception: pass else: file('../log/' + str(user_id * -1) + 'log.csv', 'a').close() with open('../log/' + str(user_id * -1) + 'log.csv', 'w+') as csvfile: writer = csv.DictWriter(csvfile, fieldnames=fieldnames) writer.writeheader() writer.writerow({'name': currentMessage.from_user.first_name, 'text': currentMessage.text}) except Exception: traceback.format_exc()
def _RegisterDebuggee(self, service): """Single attempt to register the debuggee. If the registration succeeds, sets self._debuggee_id to the registered debuggee ID. Args: service: client to use for API calls Returns: (registration_required, delay) tuple """ try: request = {'debuggee': self._GetDebuggee()} try: response = service.debuggees().register(body=request).execute() self._debuggee_id = response['debuggee']['id'] native.LogInfo('Debuggee registered successfully, ID: %s' % ( self._debuggee_id)) self.register_backoff.Succeeded() return (False, 0) # Proceed immediately to list active breakpoints. except BaseException: native.LogInfo('Failed to register debuggee: %s, %s' % (request, traceback.format_exc())) except BaseException: native.LogWarning('Debuggee information not available: ' + traceback.format_exc()) return (True, self.register_backoff.Failed())
def main(): from optparse import OptionParser parser = OptionParser() parser.add_option('-c', type='int', dest='concurrency', default=DEFAULT_CONCURRENCY, help='Number of multiple requests to make') parser.add_option('-s', type='int', dest='seconds', default=DEFAULT_SECONDS, help='Number of seconds to perform') parser.add_option("--mode", dest="mode", default=None) options, args = parser.parse_args() #run tests if 'test' == options.mode: test_parse_http_load_result() return if 1 != len(args): parser.print_help() return assert 1 <= options.concurrency <= 100 assert 1 <= options.seconds <= 100 #run bench old_sys_stderr = sys.stderr sys.stderr = StringIO() try: run_bench(args[0], options.seconds, options.concurrency) except Exception, e: print e import traceback print traceback.format_exc()
def _run(self): try: start_time = time.time() comment = '' num_ok = 0 num_err = 0 num_post = 0 loop = 0 while True: if loop % 2: if self.get_page(comment): num_ok += 1 else: num_err += 1 #check elapsed elapsed = time.time() - start_time if elapsed > self._seconds: break else: comment = self.post_comment() num_post += 1 loop += 1 score = float(num_post) / elapsed * 10 percent = float(num_ok) / float(num_ok + num_err) value = (score, percent, num_post) except Exception, e: import traceback print traceback.format_exc() value = e
def sendEmail(body): global EMAIL_USER, EMAIL_PASSWORD try: smtp_host = 'smtp.gmail.com' smtp_port = 587 server = smtplib.SMTP() server.connect(smtp_host,smtp_port) server.ehlo() server.starttls() server.login(EMAIL_USER,EMAIL_PASSWORD) fromaddr = "CS109 Server" tolist = ["*****@*****.**"] sub = "CS109 Notification" msg = email.MIMEMultipart.MIMEMultipart() msg['From'] = fromaddr msg['To'] = email.Utils.COMMASPACE.join(tolist) msg['Subject'] = sub msg.attach(MIMEText(body)) msg.attach(MIMEText('\nLove, cs109.inight.in', 'plain')) server.sendmail(EMAIL_USER,tolist,msg.as_string()) except Exception as e: print traceback.format_exc() error = "Email failed to send (error %d): %s" % (e.args[0],e.args[1]) print error MYSQL_ERROR = error
def handle_request(self, c): funcname = result = request = None try: connection.deliver_challenge(c, self.authkey) connection.answer_challenge(c, self.authkey) request = c.recv() ignore, funcname, args, kwds = request assert funcname in self.public, '%r unrecognized' % funcname func = getattr(self, funcname) except Exception: msg = ('#TRACEBACK', format_exc()) else: try: result = func(c, *args, **kwds) except Exception: msg = ('#TRACEBACK', format_exc()) else: msg = ('#RETURN', result) try: c.send(msg) except Exception as e: try: c.send(('#TRACEBACK', format_exc())) except Exception: pass util.info('Failure to send message: %r', msg) util.info(' ... request was %r', request) util.info(' ... exception was %r', e) c.close()
def run_task(self, **kwargs): Session.merge(self.task) self.task.start_time = datetime.utcnow() self.task.ident = threading.get_ident() self.task.status = TaskStatus.running.value Session.merge(self.task) Session.commit() try: self.run_function(**kwargs) self.task.log = self.log.messages self.task.end_time = datetime.utcnow() self.task.status = TaskStatus.finished.value self.task.result = TaskResult.success.value self.task = Session.merge(self.task) Session.commit() except Exception as e: self.task.log = self.log.messages self.task.tb = traceback.format_exc() self.task.end_time = datetime.utcnow() self.task.status = TaskStatus.finished.value self.task.result = TaskResult.fail.value self.task = Session.merge(self.task) Session.commit() defect = jira.defect_for_exception( "Background Task Error: {}".format( self.task.name), e, tb=traceback.format_exc(), username=self.task.username) self.task.defect_ticket = defect.key self.task = Session.merge(self.task) Session.commit() finally: Session.remove()
def query(query, *args): global MYSQL_HOST, MYSQL_USER, MYSQL_PASSWORD, MYSQL_DB, MYSQL_ERROR, LAST_INSERT_ID result = None try: con = mdb.connect(MYSQL_HOST, MYSQL_USER, MYSQL_PASSWORD, MYSQL_DB); # handle unicode problems client side con.set_character_set('utf8') cur = con.cursor() # handle unicode errors server side cur.execute('SET NAMES utf8;') cur.execute('SET CHARACTER SET utf8;') cur.execute('SET character_set_connection=utf8;') cur.execute(query, args) con.commit() LAST_INSERT_ID = cur.lastrowid result = cur.fetchall() except Exception as e: print traceback.format_exc() error = "MySQL Error %d: %s" % (e.args[0],e.args[1]) print error MYSQL_ERROR = error finally: con.close() return result
def exec_scenario_tests(self, scenario, testcase, cmd_func): err_str = "" with cmd_func(**self.dsc_kws) as dsc: try: # setup. if SCE_SETUP in scenario: self.exec_tests(dsc, scenario[SCE_SETUP]) # tests. self.exec_tests(dsc, testcase[SCE_TEST]) except DataStoreTestError as e: # ERROR err_str = "%s" % (e) except (socket.timeout, ShellCmdTimeOut): # sock or shell timeout. err_str = "%s" % (traceback.format_exc()) finally: try: # teardown. if SCE_TEARDOWN in scenario: self.exec_tests(dsc, scenario[SCE_TEARDOWN]) except (DataStoreTestError, socket.timeout, ShellCmdTimeOut): # ERROR if err_str == "": err_str = "%s" % (traceback.format_exc()) return err_str
def new_f(request, *args, **kwargs): try: return f(request, *args, **kwargs) except self.Exception, ex: if request.ResponseType == 'json': if self.showStackTrace: return '''{message:"%s", stackTrace:"%s"}'''%(str(ex)+'\n'+traceback.format_exc()) else: return '''{message:%s}'''%(self.message) elif request.ResponseType == 'xml': if self.showStackTrace: return '''<root><message>%s</message><stackTrace>%s</stackTrace></root>"'''%(str(ex)+'\n'+traceback.format_exc()) else: return '''<root><message>%s</message></root>'''%(self.message) else: if request.status == None: request.status = self.message or '' else: request.status += self.message or '' logging.error(str(ex)+'\n'+traceback.format_exc()) if self.showStackTrace or DEBUG: request.status+= " Details:<br/>"+ex.__str__()+'</br>'+traceback.format_exc() else: 'There has been some problem, and the moderator was informed about it.' request.redirect(self.redirectUrl)
def _shutdown(self, status_result): runner_status = self._runner.status try: propagate_deadline(self._chained_checker.stop, timeout=self.STOP_TIMEOUT) except Timeout: log.error('Failed to stop all checkers within deadline.') except Exception: log.error('Failed to stop health checkers:') log.error(traceback.format_exc()) try: propagate_deadline(self._runner.stop, timeout=self.STOP_TIMEOUT) except Timeout: log.error('Failed to stop runner within deadline.') except Exception: log.error('Failed to stop runner:') log.error(traceback.format_exc()) # If the runner was alive when _shutdown was called, defer to the status_result, # otherwise the runner's terminal state is the preferred state. exit_status = runner_status or status_result self.send_update( self._driver, self._task_id, exit_status.status, status_result.reason) self.terminated.set() defer(self._driver.stop, delay=self.PERSISTENCE_WAIT)
def post(self, request): try: params = request.POST action = params.get("action", "deploy") stage = params.get("current_stage") ngapp2_deploy_utils = Ngapp2DeployUtils() if action == "cancel": if stage == "deploy_to_canary": ngapp2_deploy_utils.rollback_canary() elif stage == "deploy_to_prod": ngapp2_deploy_utils.rollback_prod() ngapp2_deploy_utils.set_status_to_zk("SERVING_BUILD") else: if stage == 'pre_deploy': self.start_pre_deploy(request) elif stage == 'post_deploy': self.start_post_deploy(request) elif stage == 'rollback': self.start_roll_back(request) ngapp2_deploy_utils.set_status_to_zk(stage) except: logger.error(traceback.format_exc()) logger.warning(traceback.format_exc()) finally: return redirect("/ngapp2/deploy/")
def nextMonthDay(cnt, now, start_time, config): months = ["1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12"] days = config['day'].split(",") next = None try: for month in months: if cnt == 0 and int(month) < int(start_time.month): # same year continue for day in days: if cnt == 0 and int(month) == int(start_time.month) and int(day) < int(start_time.day): continue try: next = datetime.datetime(start_time.year + cnt, int(month), int(day), start_time.hour, start_time.minute, 0) cronlog("update monthly::(name, next, previous, current) = (%s, %s, %s, %s)"%(config['name'], str(next), str(start_time), str(now))) if next >= start_time: if next > now: return next else: next = None except: pass except: cronlog("update monthly::exception = %s"%(traceback.format_exc())) print traceback.format_exc() return next
def _run_job(cls, job_id, additional_job_params): """Starts the job.""" logging.info( 'Job %s started at %s' % (job_id, utils.get_current_time_in_millisecs())) cls.register_start(job_id) try: result = cls._run(additional_job_params) except Exception as e: logging.error(traceback.format_exc()) logging.error( 'Job %s failed at %s' % (job_id, utils.get_current_time_in_millisecs())) cls.register_failure( job_id, '%s\n%s' % (unicode(e), traceback.format_exc())) raise taskqueue_services.PermanentTaskFailure( 'Task failed: %s\n%s' % (unicode(e), traceback.format_exc())) # Note that the job may have been canceled after it started and before # it reached this stage. This will result in an exception when the # validity of the status code transition is checked. cls.register_completion(job_id, result) logging.info( 'Job %s completed at %s' % (job_id, utils.get_current_time_in_millisecs()))
def display(): global timer, alert, fps try: glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT) begin = time.time() glUseProgram(shader) nextAnimation() red = [1.0, 0., 0., 1.] green = [0., 1.0, 0., 1.] blue = [0., 0., 1.0, 1.] glMaterialfv(GL_FRONT_AND_BACK,GL_DIFFUSE,blue) drawSkyBox((N - 32) / 2, (32 - N) / 2, 32, 16) glMaterialfv(GL_FRONT_AND_BACK,GL_DIFFUSE,red) drawPlane(64, 64) glMaterialfv(GL_FRONT_AND_BACK,GL_DIFFUSE,green) drawWalls() glutSwapBuffers() elapsed = time.time() - begin fps = 1. / elapsed except Exception as e: print 'error occurs:', e print traceback.format_exc() glutLeaveMainLoop() return
def xonshrc_context(rcfiles=None, execer=None, initial=None): """Attempts to read in xonshrc file, and return the contents.""" loaded = builtins.__xonsh_env__["LOADED_RC_FILES"] = [] if initial is None: env = {} else: env = initial if rcfiles is None or execer is None: return env env["XONSHRC"] = tuple(rcfiles) for rcfile in rcfiles: if not os.path.isfile(rcfile): loaded.append(False) continue try: run_script_with_cache(rcfile, execer, env) loaded.append(True) except SyntaxError as err: loaded.append(False) exc = traceback.format_exc() msg = "{0}\nsyntax error in xonsh run control file {1!r}: {2!s}" warnings.warn(msg.format(exc, rcfile, err), RuntimeWarning) continue except Exception as err: loaded.append(False) exc = traceback.format_exc() msg = "{0}\nerror running xonsh run control file {1!r}: {2!s}" warnings.warn(msg.format(exc, rcfile, err), RuntimeWarning) continue return env
def GetDockerList(self): try: self.conn.request("GET", "/containers/json?all=1") r1 = self.conn.getresponse() except Exception,e: print traceback.format_exc() exit()
and ((non_marked and tr.find('img', src=rc['filter'])) or (not non_marked and not tr.find('img', src=rc['filter'])))): continue try: head = head if None is not head else self._header_row(tr) seeders, leechers, size = [tryInt(n, n) for n in [ cells[head[x]].get_text().strip() for x in 'seed', 'leech', 'size']] if not tr.find('a', href=rc['cats']) or self._reject_item(seeders, leechers): continue title = tr.find('a', href=rc['info']).get_text().strip() download_url = self._link(tr.find('a', href=rc['get'])['href']) except (AttributeError, TypeError, ValueError, IndexError): continue if title and download_url: items[mode].append((title, download_url, seeders, self._bytesizer(size))) except generic.HaltParseException: pass except (BaseException, Exception): logger.log(u'Failed to parse. Traceback: %s' % traceback.format_exc(), logger.ERROR) self._log_search(mode, len(items[mode]) - cnt, log + search_url) results = self._sort_seeding(mode, results + items[mode]) return results provider = HDTorrentsProvider()
def handle_(self, event): if event.get_event_code( ) == win32.EXCEPTION_DEBUG_EVENT and event.is_last_chance(): self.timer.cancel() self.timer = False crash = Crash(event) crash.fetch_extra_data(event, takeMemorySnapshot=0) unique = crash.signature[3] if not self.isMinimize: if unique in self.unique_list: print "[-] Duplicate Crash" self.quit_(event.get_process()) else: self.crashResult = True self.crash_count += 1 self.unique_list.append(unique) try: with open('unique.txt', 'w') as f: f.write('\n'.join(self.unique_list)) except: import traceback print traceback.format_exc() pass try: report = crash.fullReport() prog = self.program.split("\\")[-1] with open( self.crashDir + prog + '_' + datetime.now().strftime("%y-%m-%d-%H-%M-%S") + ".log", "w") as f: f.write(report) self.resultFile = self.crashDir+prog+'_'\ +datetime.now().strftime("%y-%m-%d-%H-%M-%S")+'_'+self.target_file.split("\\")[-1] shutil.copy(self.target_file, self.resultFile) except: import traceback print traceback.format_exc() pass try: event.get_process().kill() except: subprocess.call("taskkill /f /im %s" % self.program.split("\\")[-1]) else: # minimize if unique == self.unique_list[-1]: # minimize success self.crashResult = True try: prog = self.program.split("\\")[-1] shutil.copy(self.target_file, self.resultFile + ".minimize") except: import traceback print traceback.format_exc() pass try: event.get_process().kill() except: subprocess.call("taskkill /f /im %s" % self.program.split("\\")[-1]) else: # not crash if not self.timer: self.timer = threading.Timer(5, self.quit_, [event.get_process()]) self.timer.start()
def reset(self): """Reset the environment. This method is performed in between rollouts. It resets the state of the environment, and re-initializes the vehicles in their starting positions. If "vehicle_arrangement_shuffle" is set to True in env_params, the vehicles swap initial positions with one another. Also, if a "starting_position_shuffle" is set to True, the initial position of vehicles are redone. If "warmup_steps" is set to a value greater than 0, then this method also runs the necessary number of warmup steps before beginning training, with actions to the agents being assigned by the simulator. Returns ------- observation: numpy ndarray the initial observation of the space. The initial reward is assumed to be zero. """ # reset the time counter self.time_counter = 0 # warn about not using restart_instance when using inflows if len(self.scenario.net_params.inflows.get()) > 0 and \ not self.sumo_params.restart_instance: print( "**********************************************************\n" "**********************************************************\n" "**********************************************************\n" "WARNING: Inflows will cause computational performance to\n" "significantly decrease after large number of rollouts. In \n" "order to avoid this, set SumoParams(restart_instance=True).\n" "**********************************************************\n" "**********************************************************\n" "**********************************************************" ) if self.sumo_params.restart_instance or self.step_counter > 2e6: self.step_counter = 0 # issue a random seed to induce randomness into the next rollout self.sumo_params.seed = random.randint(0, 1e5) # modify the vehicles class to match initial data self.vehicles = deepcopy(self.initial_vehicles) # restart the sumo instance self.restart_sumo(self.sumo_params) # perform shuffling (if requested) if self.starting_position_shuffle or self.vehicle_arrangement_shuffle: if self.starting_position_shuffle: x0 = np.random.uniform(0, self.scenario.length) else: x0 = self.scenario.initial_config.x0 veh_ids = deepcopy(self.initial_ids) if self.vehicle_arrangement_shuffle: random.shuffle(veh_ids) initial_positions, initial_lanes = \ self.scenario.generate_starting_positions( num_vehicles=len(self.initial_ids), x0=x0) initial_state = dict() for i, veh_id in enumerate(veh_ids): route_id = "route" + initial_positions[i][0] # replace initial routes, lanes, and positions to reflect # new values list_initial_state = list(self.initial_state[veh_id]) list_initial_state[1] = route_id list_initial_state[2] = initial_lanes[i] list_initial_state[3] = initial_positions[i][1] initial_state[veh_id] = tuple(list_initial_state) # replace initial positions in initial observations self.initial_observations[veh_id]["edge"] = \ initial_positions[i][0] self.initial_observations[veh_id]["position"] = \ initial_positions[i][1] self.initial_state = deepcopy(initial_state) # # clear all vehicles from the network and the vehicles class for veh_id in self.traci_connection.vehicle.getIDList(): try: self.traci_connection.vehicle.remove(veh_id) self.traci_connection.vehicle.unsubscribe(veh_id) self.vehicles.remove(veh_id) except (FatalTraCIError, TraCIException): print("Error during start: {}".format(traceback.format_exc())) pass # clear all vehicles from the network and the vehicles class # FIXME (ev, ak) this is weird and shouldn't be necessary for veh_id in list(self.vehicles.get_ids()): self.vehicles.remove(veh_id) try: self.traci_connection.vehicle.remove(veh_id) self.traci_connection.vehicle.unsubscribe(veh_id) except (FatalTraCIError, TraCIException): print("Error during start: {}".format(traceback.format_exc())) # reintroduce the initial vehicles to the network for veh_id in self.initial_ids: type_id, route_id, lane_index, lane_pos, speed, pos = \ self.initial_state[veh_id] try: self.traci_connection.vehicle.addFull( veh_id, route_id, typeID=str(type_id), departLane=str(lane_index), departPos=str(lane_pos), departSpeed=str(speed)) except (FatalTraCIError, TraCIException): # if a vehicle was not removed in the first attempt, remove it # now and then reintroduce it self.traci_connection.vehicle.remove(veh_id) self.traci_connection.vehicle.addFull( veh_id, route_id, typeID=str(type_id), departLane=str(lane_index), departPos=str(lane_pos), departSpeed=str(speed)) self.traci_connection.simulationStep() # collect subscription information from sumo vehicle_obs = self.traci_connection.vehicle.getSubscriptionResults() id_lists = self.traci_connection.simulation.getSubscriptionResults() tls_obs = self.traci_connection.trafficlight.getSubscriptionResults() # store new observations in the vehicles and traffic lights class self.vehicles.update(vehicle_obs, id_lists, self) self.traffic_lights.update(tls_obs) # update the colors of vehicles self.update_vehicle_colors() self.prev_last_lc = dict() for veh_id in self.vehicles.get_ids(): # re-initialize the vehicles class with the states of the vehicles # at the start of a rollout self.vehicles.set_absolute_position(veh_id, self.get_x_by_id(veh_id)) # re-initialize memory on last lc self.prev_last_lc[veh_id] = -float("inf") # collect list of sorted vehicle ids self.sorted_ids, self.sorted_extra_data = self.sort_by_position() states = self.get_state() if isinstance(states, dict): self.state = {} observation = {} for key, state in states.items(): # collect information of the state of the network based on the # environment class used self.state[key] = np.asarray(state).T # collect observation new state associated with action observation[key] = np.copy(self.state[key]).tolist() else: # collect information of the state of the network based on the # environment class used self.state = np.asarray(states).T # observation associated with the reset (no warm-up steps) observation = np.copy(states) # perform (optional) warm-up steps before training for _ in range(self.env_params.warmup_steps): observation, _, _, _ = self.step(rl_actions=None) # render a frame self.render(reset=True) return observation
def start_sumo(self): """Start a sumo instance. Uses the configuration files created by the scenario class to initialize a sumo instance. Also initializes a traci connection to interface with sumo from Python. """ error = None for _ in range(RETRIES_ON_ERROR): try: # port number the sumo instance will be run on if self.sumo_params.port is not None: port = self.sumo_params.port else: # Don't do backoff when testing if os.environ.get("TEST_FLAG", 0): # backoff to decrease likelihood of race condition time_stamp = ''.join(str(time.time()).split('.')) # 1.0 for consistency w/ above time.sleep(1.0 * int(time_stamp[-6:]) / 1e6) port = sumolib.miscutils.getFreeSocketPort() sumo_binary = "sumo-gui" if self.sumo_params.render is True\ else "sumo" # command used to start sumo sumo_call = [ sumo_binary, "-c", self.scenario.cfg, "--remote-port", str(port), "--num-clients", str(self.sumo_params.num_clients), "--step-length", str(self.sim_step) ] # add step logs (if requested) if self.sumo_params.no_step_log: sumo_call.append("--no-step-log") # add the lateral resolution of the sublanes (if requested) if self.sumo_params.lateral_resolution is not None: sumo_call.append("--lateral-resolution") sumo_call.append(str(self.sumo_params.lateral_resolution)) # add the emission path to the sumo command (if requested) if self.sumo_params.emission_path is not None: ensure_dir(self.sumo_params.emission_path) emission_out = \ self.sumo_params.emission_path + \ "{0}-emission.xml".format(self.scenario.name) sumo_call.append("--emission-output") sumo_call.append(emission_out) else: emission_out = None if self.sumo_params.overtake_right: sumo_call.append("--lanechange.overtake-right") sumo_call.append("true") if self.sumo_params.ballistic: sumo_call.append("--step-method.ballistic") sumo_call.append("true") # specify a simulation seed (if requested) if self.sumo_params.seed is not None: sumo_call.append("--seed") sumo_call.append(str(self.sumo_params.seed)) if not self.sumo_params.print_warnings: sumo_call.append("--no-warnings") sumo_call.append("true") # set the time it takes for a gridlock teleport to occur sumo_call.append("--time-to-teleport") sumo_call.append(str(int(self.sumo_params.teleport_time))) logging.info(" Starting SUMO on port " + str(port)) logging.debug(" Cfg file: " + str(self.scenario.cfg)) if self.sumo_params.num_clients > 1: logging.info(" Num clients are" + str(self.sumo_params.num_clients)) logging.debug(" Emission file: " + str(emission_out)) logging.debug(" Step length: " + str(self.sim_step)) # Opening the I/O thread to SUMO self.sumo_proc = subprocess.Popen( sumo_call, preexec_fn=os.setsid) # wait a small period of time for the subprocess to activate # before trying to connect with traci if os.environ.get("TEST_FLAG", 0): time.sleep(0.1) else: time.sleep(config.SUMO_SLEEP) self.traci_connection = traci.connect(port, numRetries=100) self.traci_connection.setOrder(0) self.traci_connection.simulationStep() return except Exception as e: print("Error during start: {}".format(traceback.format_exc())) error = e self.teardown_sumo() raise error
def teardown_sumo(self): """Kill the sumo subprocess instance.""" try: os.killpg(self.sumo_proc.pid, signal.SIGTERM) except Exception: print("Error during teardown: {}".format(traceback.format_exc()))
for test_count, attachment in attachments.items(): attachment = MIMEText(attachment) attachment.add_header("Content-Disposition", "attachment", filename="test_case_%d_console_output.txt" % test_count) msg.attach(attachment) send_email(msg) else: SUBJECT = "Successful %s" % SUBJECT msg = prepare_email("All test cases were successful\n\n%s" % end_string) send_email(msg) if __name__ == "__main__": log_fd = open("/tmp/sqlmapregressiontest.log", "wb") log_fd.write("Regression test started at %s\n" % START_TIME) try: main() except Exception, e: log_fd.write("An exception has occurred:\n%s" % str(traceback.format_exc())) log_fd.write("Regression test finished at %s\n\n" % time.strftime("%H:%M:%S %d-%m-%Y", time.gmtime())) log_fd.close()
def fail_if_exception(self, msg): try: yield except Exception: # pylint: disable=W0703 trace = tb.format_exc() self.fail("{}\b{}".format(msg, trace))
def low(self, fun, low, print_event=True, full_return=False): ''' Execute a function from low data Low data includes: required: - fun: the name of the function to run optional: - arg: a list of args to pass to fun - kwarg: kwargs for fun - __user__: user who is running the command - __jid__: jid to run under - __tag__: tag to run under ''' # fire the mminion loading (if not already done) here # this is not to clutter the output with the module loading # if we have a high debug level. self.mminion # pylint: disable=W0104 jid = low.get('__jid__', salt.utils.jid.gen_jid(self.opts)) tag = low.get('__tag__', salt.utils.event.tagify(jid, prefix=self.tag_prefix)) data = { 'fun': '{0}.{1}'.format(self.client, fun), 'jid': jid, 'user': low.get('__user__', 'UNKNOWN'), } event = salt.utils.event.get_event('master', self.opts['sock_dir'], self.opts['transport'], opts=self.opts, listen=False) if print_event: print_func = self.print_async_event \ if hasattr(self, 'print_async_event') \ else None else: # Suppress printing of return event (this keeps us from printing # runner/wheel output during orchestration). print_func = None namespaced_event = salt.utils.event.NamespacedEvent( event, tag, print_func=print_func) # TODO: test that they exist # TODO: Other things to inject?? func_globals = { '__jid__': jid, '__user__': data['user'], '__tag__': tag, # weak ref to avoid the Exception in interpreter # teardown of event '__jid_event__': weakref.proxy(namespaced_event), } try: self_functions = pycopy.copy(self.functions) salt.utils.lazy.verify_fun(self_functions, fun) # Inject some useful globals to *all* the function's global # namespace only once per module-- not per func completed_funcs = [] for mod_name in six.iterkeys(self_functions): if '.' not in mod_name: continue mod, _ = mod_name.split('.', 1) if mod in completed_funcs: continue completed_funcs.append(mod) for global_key, value in six.iteritems(func_globals): self.functions[mod_name].__globals__[global_key] = value # There are some discrepancies of what a "low" structure is in the # publisher world it is a dict including stuff such as jid, fun, # arg (a list of args, with kwargs packed in). Historically this # particular one has had no "arg" and just has had all the kwargs # packed into the top level object. The plan is to move away from # that since the caller knows what is an arg vs a kwarg, but while # we make the transition we will load "kwargs" using format_call if # there are no kwargs in the low object passed in. if 'arg' in low and 'kwarg' in low: args = low['arg'] kwargs = low['kwarg'] else: f_call = salt.utils.args.format_call( self.functions[fun], low, expected_extra_kws=CLIENT_INTERNAL_KEYWORDS) args = f_call.get('args', ()) kwargs = f_call.get('kwargs', {}) # Update the event data with loaded args and kwargs data['fun_args'] = list(args) + ([kwargs] if kwargs else []) func_globals['__jid_event__'].fire_event(data, 'new') # Initialize a context for executing the method. with tornado.stack_context.StackContext( self.functions.context_dict.clone): func = self.functions[fun] try: data['return'] = func(*args, **kwargs) except TypeError as exc: data[ 'return'] = '\nPassed invalid arguments: {0}\n\nUsage:\n{1}'.format( exc, func.__doc__) try: data['success'] = self.context.get('retcode', 0) == 0 except AttributeError: # Assume a True result if no context attribute data['success'] = True if isinstance(data['return'], dict) and 'data' in data['return']: # some functions can return boolean values data['success'] = salt.utils.state.check_result( data['return']['data']) except (Exception, SystemExit) as ex: if isinstance(ex, salt.exceptions.NotImplemented): data['return'] = six.text_type(ex) else: data['return'] = 'Exception occurred in {0} {1}: {2}'.format( self.client, fun, traceback.format_exc(), ) data['success'] = False if self.store_job: try: salt.utils.job.store_job( self.opts, { 'id': self.opts['id'], 'tgt': self.opts['id'], 'jid': data['jid'], 'return': data, }, event=None, mminion=self.mminion, ) except salt.exceptions.SaltCacheError: log.error('Could not store job cache info. ' 'Job details for this run may be unavailable.') # Outputters _can_ mutate data so write to the job cache first! namespaced_event.fire_event(data, 'ret') # if we fired an event, make sure to delete the event object. # This will ensure that we call destroy, which will do the 0MQ linger log.info('Runner completed: %s', data['jid']) del event del namespaced_event return data if full_return else data['return']
def do_POST(self): """Handles the HTTP POST request. Attempts to interpret all HTTP POST requests as XML-RPC calls, which are forwarded to the server's _dispatch method for handling. """ # Check that the path is legal if not self.is_rpc_path_valid(): self.report_404() return try: # Get arguments by reading body of request. # We read this in chunks to avoid straining # socket.read(); around the 10 or 15Mb mark, some platforms # begin to have problems (bug #792570). max_chunk_size = 10 * 1024 * 1024 size_remaining = int(self.headers["content-length"]) L = [] while size_remaining: chunk_size = min(size_remaining, max_chunk_size) chunk = self.rfile.read(chunk_size) if not chunk: break L.append(chunk) size_remaining -= len(L[-1]) data = b''.join(L) data = self.decode_request_content(data) if data is None: return #response has been sent # In previous versions of SimpleXMLRPCServer, _dispatch # could be overridden in this class, instead of in # SimpleXMLRPCDispatcher. To maintain backwards compatibility, # check to see if a subclass implements _dispatch and dispatch # using that method if present. response = self.server._marshaled_dispatch( data, getattr(self, '_dispatch', None), self.path) except Exception as e: # This should only happen if the module is buggy # internal error, report as HTTP server error self.send_response(500) # Send information about the exception if requested if hasattr(self.server, '_send_traceback_header') and \ self.server._send_traceback_header: self.send_header("X-exception", str(e)) trace = traceback.format_exc() trace = str(trace.encode('ASCII', 'backslashreplace'), 'ASCII') self.send_header("X-traceback", trace) self.send_header("Content-length", "0") self.end_headers() else: self.send_response(200) self.send_header("Content-type", "text/xml") if self.encode_threshold is not None: if len(response) > self.encode_threshold: q = self.accept_encodings().get("gzip", 0) if q: try: response = gzip_encode(response) self.send_header("Content-Encoding", "gzip") except NotImplementedError: pass self.send_header("Content-length", str(len(response))) self.end_headers() self.wfile.write(response)
async def wrapper(check): if not LOGSPAMMER: send_to = check.chat_id else: send_to = BOTLOG_CHATID if not trigger_on_fwd and check.fwd_from: return if check.via_bot_id and not trigger_on_inline: return if groups_only and not check.is_group: await check.respond("`Bunun bir grup olduğunu sanmıyorum.`") return try: await func(check) except events.StopPropagation: raise events.StopPropagation except KeyboardInterrupt: pass except BaseException: if not disable_errors: date = strftime("%Y-%m-%d %H:%M:%S", gmtime()) text = "**USERBOT HATA RAPORU**\n" link = "[Asena Destek Grubu](https://t.me/AsenaSupport)" text += "İsterseniz, bunu rapor edebilirsiniz" text += f"- sadece bu mesajı buraya iletin {link}.\n" text += "Hata ve Tarih dışında hiçbir şey kaydedilmez\n" ftext = "========== UYARI ==========" ftext += "\nBu dosya sadece burada yüklendi," ftext += "\nsadece hata ve tarih kısmını kaydettik," ftext += "\ngizliliğinize saygı duyuyoruz," ftext += "\nburada herhangi bir gizli veri varsa" ftext += "\nbu hata raporu olmayabilir, kimse verilerinize ulaşamaz.\n" ftext += "================================\n\n" ftext += "--------USERBOT HATA GUNLUGU--------\n" ftext += "\nTarih: " + date ftext += "\nGrup ID: " + str(check.chat_id) ftext += "\nGönderen kişinin ID: " + str(check.sender_id) ftext += "\n\nOlay Tetikleyici:\n" ftext += str(check.text) ftext += "\n\nGeri izleme bilgisi:\n" ftext += str(format_exc()) ftext += "\n\nHata metni:\n" ftext += str(sys.exc_info()[1]) ftext += "\n\n--------USERBOT HATA GUNLUGU BITIS--------" command = "git log --pretty=format:\"%an: %s\" -10" ftext += "\n\n\nSon 10 commit:\n" process = await asyncsubshell(command, stdout=asyncsub.PIPE, stderr=asyncsub.PIPE) stdout, stderr = await process.communicate() result = str(stdout.decode().strip()) \ + str(stderr.decode().strip()) ftext += result file = open("error.log", "w+") file.write(ftext) file.close() if LOGSPAMMER: await check.client.respond( "`Üzgünüm, UserBot'um çöktü.\ \nHata günlükleri UserBot günlük grubunda saklanır.`") await check.client.send_file(send_to, "error.log", caption=text) remove("error.log") else: pass
def migration(): try: ModelItem.migration() except Exception as e: logger.error('Exception:%s', e) logger.error(traceback.format_exc())
def parse_episode(self, response): try: log.msg('parse_episode %s' % response.request.url) thumb_url = response.request.meta['thumb'] cat_name = response.request.meta['cat_name'] audit = response.request.meta['audit'] priority = response.request.meta['priority'] items = [] #show_id show_id = Util.get_iqiyi_showid(response.request.url) #print "show_id: %s" % show_id #space maybe exist: "albumId:326754200" or "albumId: 326754200" albumid = response.selector.re(re.compile(r'albumId: ?(\d+)')) #video info title = response.xpath( '//div[@class="play-tit-l"]/h2/descendant-or-self::*/text()' ).extract() if not title: title = response.xpath( '//div[@class="play-tit-l"]/h1/descendant-or-self::*/text()' ).extract() if not title: title = response.xpath( '//div[@class="mod-play-t**s"]/h1/descendant-or-self::*/text()' ).extract() if not title: title = response.xpath( '//div[@class="play-tit play-tit-oneRow play-tit-long"]/h1/descendant-or-self::*/text()' ).extract() category = response.xpath( '//div[@class="crumb_bar"]/span[1]/span/a[2]/text()').extract( ) if not category: category = response.xpath( '//div[@class="play-album-crumbs textOverflow"]/span[1]/a[2]/text()' ).extract() if not category: category = response.xpath( '//div[@class="crumb_bar"]/span[1]/a[2]/text()').extract() if not category: category = response.xpath( '//div[@class="mod-crumb_bar"]/span[1]/a[2]/text()' ).extract() upload_time = response.xpath( '//div[@class="crumb_bar"]/span[3]/span/text()').extract() if not upload_time: upload_time = response.xpath( '//div[@class="crumb_bar"]/span[2]/span/text()').extract() tag = response.xpath( '//span[@id="widget-videotag"]/descendant::*/text()').extract( ) if not tag: tag = response.xpath( '//span[@class="mod-tags_item vl-block"]/descendant::*/text()' ).extract() if not tag: tag = response.xpath( '//div[@class="crumb_bar"]/span[2]/a/text()').extract() ep_item = EpisodeItem() if title: ep_item['title'] = "".join([t.strip() for t in title]) if show_id: ep_item['show_id'] = show_id if tag: ep_item['tag'] = "|".join([t.strip() for t in tag]) if upload_time: ep_item['upload_time'] = upload_time[0].strip() #if category: # ep_item['category'] = category[0].strip() if thumb_url: ep_item['thumb_url'] = thumb_url[0].strip() ep_item['spider_id'] = self.spider_id ep_item['site_id'] = self.site_id ep_item['url'] = response.request.url #ep_item['cat_id'] = cat_id ep_item['category'] = cat_name ep_item['format_id'] = '2' ep_item['audit'] = audit ep_item['priority'] = priority if albumid: items.append( Request(url=self.playlength_url + albumid[0], callback=self.parse_playlength, meta={ 'item': ep_item, 'albumid': albumid[0] })) else: items.append(ep_item) return items except Exception as e: log.msg(traceback.format_exc(), level=log.ERROR)
def log(self, data=None): message = "\n".join(["Data", json.dumps(data, default=str, indent=4), "Exception", traceback.format_exc()]) return frappe.log_error(title="Tally Migration Error", message=message)
def plugin_unload(): try: logger.debug('%s plugin_unload', package_name) except Exception as e: logger.error('Exception:%s', e) logger.error(traceback.format_exc())
def render_post(self, request, components, msg): """ Forward a signed message through the gossip network. """ data = request.content.getvalue() msg = self._get_message(request) # if it is an error response message, returns it immediately if isinstance(msg, dict) and 'status' in msg: return msg if self.validator.config.get("LocalValidation", True): # determine if the message contains a valid transaction before # we send the message to the network # we need to start with a copy of the message due to cases # where side effects of the validity check may impact objects # related to msg mymsg = copy.deepcopy(msg) if hasattr(mymsg, 'Transaction') and mymsg.Transaction is not None: mytxn = mymsg.Transaction LOGGER.info('starting local validation ' 'for txn id: %s type: %s', mytxn.Identifier, mytxn.TransactionTypeName) pending_block_txns = None if self.journal.pending_block is not None: pending_block_txns = \ self.journal.pending_block.TransactionIDs try: temp_store_map = self._get_store_map() except KeyError as e: return self._encode_error_response( request, http.NOT_FOUND, e) pending_txns = copy.copy(self.journal.pending_transactions) pending_txn_ids = [x for x in pending_txns.iterkeys()] # clone a copy of the ledger's message queue so we can # temporarily play forward all locally submitted yet # uncommitted transactions my_queue = copy.deepcopy( self.validator.gossip.IncomingMessageQueue) transaction_type = mytxn.TransactionTypeName if transaction_type not in temp_store_map.TransactionStores: LOGGER.info('transaction type %s not in global store map', transaction_type) return self._encode_error_response( request, http.BAD_REQUEST, 'unable to validate enclosed' ' transaction {0}'.format(data)) if pending_block_txns is not None: pending_txn_ids = pending_block_txns + pending_txn_ids # apply any local pending transactions for txn_id in pending_txn_ids: if txn_id in self.journal.transaction_store: pend_txn = self.journal.transaction_store[txn_id] my_store = temp_store_map.get_transaction_store( pend_txn.TransactionTypeName) if pend_txn and pend_txn.is_valid(my_store): my_pend_txn = copy.copy(pend_txn) my_pend_txn.apply(my_store) # apply any enqueued messages while len(my_queue) > 0: qmsg = my_queue.pop() if qmsg and \ self.journal.gossip.dispatcher.\ has_message_handler(qmsg.MessageType): if (hasattr(qmsg, 'Transaction') and qmsg.Transaction is not None): my_store = temp_store_map.get_transaction_store( qmsg.Transaction.TransactionTypeName) if qmsg.Transaction.is_valid(my_store): myqtxn = copy.copy(qmsg.Transaction) myqtxn.apply(my_store) # determine validity of the POSTed transaction against our # new temporary state my_store = temp_store_map.get_transaction_store( mytxn.TransactionTypeName) try: LOGGER.info(mytxn) mytxn.check_valid(my_store) except InvalidTransactionError as e: LOGGER.info('submitted transaction fails transaction ' 'family validation check: %s; %s', request.path, mymsg.dump()) return self._encode_error_response( request, http.BAD_REQUEST, e) except: LOGGER.info('submitted transaction is ' 'not valid %s; %s; %s', request.path, mymsg.dump(), traceback.format_exc(20)) raise LOGGER.info('transaction %s is valid', msg.Transaction.Identifier) # and finally execute the associated method # and send back the results self.validator.gossip.broadcast_message(msg) return msg.dump()
def process_telegram_data(data): try: logger.debug(data) except Exception as e: logger.error('Exception:%s', e) logger.error(traceback.format_exc())
def data_mapping(dataframe, template_doc, protocol_doc, sar_doc, another_doc, filename): try: for each_record in dataframe.values[::-1]: template_doc_body = template_doc._element.body for i in range(len(template_doc_body)): if isinstance(template_doc_body[i], CT_P): para = Paragraph(template_doc_body[i], CT_P) if template_doc_body[i].style == None: pass else: if 'Heading' in template_doc_body[i].style and para.text.strip() == re.sub("^\d+(?:\.\d*)*", '', each_record[1]).strip(): source_file = '' if each_record[2] == 'Protocol': source_file = protocol_doc source_indices = get_indices_from_source(source_file, re.sub("^\d+(?:\.\d*)*", '', each_record[3]).strip()) elif each_record[2] == 'SAR': source_file = sar_doc source_indices = get_indices_from_source(source_file, each_record[3].strip()) else: source_file = another_doc source_indices = get_indices_from_source(source_file, re.sub("^\d+(?:\.\d*)*", '', each_record[3]).strip()) if source_indices[0] != 0 and source_indices[1] != 0: copy_mapped_data(i, template_doc_body, source_file, source_indices, template_doc) timestr = time.strftime("%Y%m%d-%H%M%S") if filename: f_name = 'reports\\'+ filename + '_' + timestr +'.docx' else: f_name = 'reports\\output_'+ timestr +'.docx' file_path = os.path.join(settings.MEDIA_ROOT, f_name) template_doc.save(file_path) return f_name except Exception as e: csr_except_logger.critical(str(e) + '\n' + str(traceback.format_exc()))
logger.info('renderizar itemlist') platformtools.render_items(itemlist, item) # ~ elif itemlist == None: # Si kodi espera un listado (desactivado pq igualmente sale ERROR: GetDirectory en el log) # ~ logger.info('sin renderizar') # ~ platformtools.render_no_items() elif itemlist == True: logger.info('El canal ha ejecutado correctamente una acción que no devuelve ningún listado.') elif itemlist == False: logger.info('El canal ha ejecutado una acción que no devuelve ningún listado.') except urllib2.URLError, e: import traceback logger.error(traceback.format_exc()) # Grab inner and third party errors if hasattr(e, 'reason'): logger.error("Razon del error, codigo: %s | Razon: %s" % (str(e.reason[0]), str(e.reason[1]))) texto = "No se puede conectar con el servidor" # "No se puede conectar con el sitio web" platformtools.dialog_ok(config.__addon_name, texto) # Grab server response errors elif hasattr(e, 'code'): logger.error("Codigo de error HTTP : %d" % e.code) platformtools.dialog_ok(config.__addon_name, "El sitio web no funciona correctamente (error http %d)" % e.code) except WebErrorException, e: import traceback logger.error(traceback.format_exc())
def reset(self, new_inflow_rate=None): """Reset the environment. This method is performed in between rollouts. It resets the state of the environment, and re-initializes the vehicles in their starting positions. If "shuffle" is set to True in InitialConfig, the initial positions of vehicles is recalculated and the vehicles are shuffled. Returns ------- observation : dict of array_like the initial observation of the space. The initial reward is assumed to be zero. """ # reset the time counter self.time_counter = 0 # warn about not using restart_instance when using inflows if len(self.net_params.inflows.get()) > 0 and \ not self.sim_params.restart_instance: print( "**********************************************************\n" "**********************************************************\n" "**********************************************************\n" "WARNING: Inflows will cause computational performance to\n" "significantly decrease after large number of rollouts. In \n" "order to avoid this, set SumoParams(restart_instance=True).\n" "**********************************************************\n" "**********************************************************\n" "**********************************************************") if self.sim_params.restart_instance or \ (self.step_counter > 2e6 and self.simulator != 'aimsun'): self.step_counter = 0 # issue a random seed to induce randomness into the next rollout self.sim_params.seed = random.randint(0, 1e5) self.k.vehicle = deepcopy(self.initial_vehicles) self.k.vehicle.master_kernel = self.k # restart the sumo instance self.restart_simulation(self.sim_params) # perform shuffling (if requested) elif self.initial_config.shuffle: self.setup_initial_state() # clear all vehicles from the network and the vehicles class if self.simulator == 'traci': for veh_id in self.k.kernel_api.vehicle.getIDList(): # FIXME: hack try: self.k.vehicle.remove(veh_id) except (FatalTraCIError, TraCIException): print(traceback.format_exc()) # clear all vehicles from the network and the vehicles class # FIXME (ev, ak) this is weird and shouldn't be necessary for veh_id in list(self.k.vehicle.get_ids()): # do not try to remove the vehicles from the network in the first # step after initializing the network, as there will be no vehicles if self.step_counter == 0: continue try: self.k.vehicle.remove(veh_id) except (FatalTraCIError, TraCIException): print("Error during start: {}".format(traceback.format_exc())) # reintroduce the initial vehicles to the network for veh_id in self.initial_ids: type_id, edge, lane_index, pos, speed = \ self.initial_state[veh_id] try: self.k.vehicle.add(veh_id=veh_id, type_id=type_id, edge=edge, lane=lane_index, pos=pos, speed=speed) except (FatalTraCIError, TraCIException): # if a vehicle was not removed in the first attempt, remove it # now and then reintroduce it self.k.vehicle.remove(veh_id) if self.simulator == 'traci': self.k.kernel_api.vehicle.remove(veh_id) # FIXME: hack self.k.vehicle.add(veh_id=veh_id, type_id=type_id, edge=edge, lane=lane_index, pos=pos, speed=speed) # advance the simulation in the simulator by one step self.k.simulation.simulation_step() # update the information in each kernel to match the current state self.k.update(reset=True) # update the colors of vehicles if self.sim_params.render: self.k.vehicle.update_vehicle_colors() # check to make sure all vehicles have been spawned if len(self.initial_ids) > self.k.vehicle.num_vehicles: missing_vehicles = list( set(self.initial_ids) - set(self.k.vehicle.get_ids())) msg = '\nNot enough vehicles have spawned! Bad start?\n' \ 'Missing vehicles / initial state:\n' for veh_id in missing_vehicles: msg += '- {}: {}\n'.format(veh_id, self.initial_state[veh_id]) raise FatalFlowError(msg=msg) # perform (optional) warm-up steps before training for _ in range(self.env_params.warmup_steps): observation, _, _, _ = self.step(rl_actions=[0, 0, 0]) # render a frame self.render(reset=True) return self.get_state()
#or find a way to kill the access from memory if curfile in ('unrar.dll', 'unrar64.dll'): try: os.chmod(new_path, stat.S_IWRITE) os.remove(new_path) os.renames(old_path, new_path) except Exception, e: logger.log(u"Unable to update " + new_path + ': ' + ex(e), logger.DEBUG) os.remove(old_path)#Trash the updated file without moving in new path continue if os.path.isfile(new_path): os.remove(new_path) os.renames(old_path, new_path) # update version.txt with commit hash try: ver_file = open(version_path, 'w') ver_file.write(self._newest_commit_hash) ver_file.close() except (IOError, OSError), e: logger.log(u"Unable to write version file, update not complete: " + ex(e), logger.ERROR) return False except Exception, e: logger.log(u"Error while trying to update: " + ex(e), logger.ERROR) logger.log(u"Traceback: " + traceback.format_exc(), logger.DEBUG) return False return True
async def _create_offer_for_ids(self, offer: Dict[int, int]) -> Tuple[bool, Optional[TradeRecord], Optional[str]]: """ Offer is dictionary of wallet ids and amount """ spend_bundle = None try: for id in offer.keys(): amount = offer[id] wallet_id = uint32(int(id)) wallet = self.wallet_state_manager.wallets[wallet_id] if isinstance(wallet, CCWallet): balance = await wallet.get_confirmed_balance() if balance < abs(amount) and amount < 0: raise Exception(f"insufficient funds in wallet {wallet_id}") if amount > 0: if spend_bundle is None: to_exclude: List[Coin] = [] else: to_exclude = spend_bundle.removals() zero_spend_bundle: SpendBundle = await wallet.generate_zero_val_coin(False, to_exclude) if spend_bundle is None: spend_bundle = zero_spend_bundle else: spend_bundle = SpendBundle.aggregate([spend_bundle, zero_spend_bundle]) additions = zero_spend_bundle.additions() removals = zero_spend_bundle.removals() zero_val_coin: Optional[Coin] = None for add in additions: if add not in removals and add.amount == 0: zero_val_coin = add new_spend_bundle = await wallet.create_spend_bundle_relative_amount(amount, zero_val_coin) else: new_spend_bundle = await wallet.create_spend_bundle_relative_amount(amount) elif isinstance(wallet, Wallet): if spend_bundle is None: to_exclude = [] else: to_exclude = spend_bundle.removals() new_spend_bundle = await wallet.create_spend_bundle_relative_chia(amount, to_exclude) else: return False, None, "unsupported wallet type" if new_spend_bundle is None or new_spend_bundle.removals() == []: raise Exception(f"Wallet {id} was unable to create offer.") if spend_bundle is None: spend_bundle = new_spend_bundle else: spend_bundle = SpendBundle.aggregate([spend_bundle, new_spend_bundle]) if spend_bundle is None: return False, None, None now = uint64(int(time.time())) trade_offer: TradeRecord = TradeRecord( confirmed_at_index=uint32(0), accepted_at_time=None, created_at_time=now, my_offer=True, sent=uint32(0), spend_bundle=spend_bundle, tx_spend_bundle=None, additions=spend_bundle.additions(), removals=spend_bundle.removals(), trade_id=std_hash(spend_bundle.name() + bytes(now)), status=uint32(TradeStatus.PENDING_ACCEPT.value), sent_to=[], ) return True, trade_offer, None except Exception as e: tb = traceback.format_exc() self.log.error(f"Error with creating trade offer: {type(e)}{tb}") return False, None, str(e)
def get_indices_from_source(source_file, copy_heading): try: body = source_file._element.body.xpath('w:p | w:tbl | //w:p/w:r/w:drawing/wp:inline') source_start_index = 0 source_end_index = 0 for i in range(len(body)): if isinstance(body[i], CT_P): para = Paragraph(body[i], CT_P) if body[i].style == None: pass else: if 'Heading' in body[i].style and para.text.strip() == copy_heading: temp = body[i].style try: temp_num = list(map(int, re.findall(r'\d+', temp)))[0] except: # Some headings contains no number temp_num = None # if numbered headings if temp_num != None: sub_doc = body[i+1::] for j in range(len(sub_doc)): if isinstance(sub_doc[j], CT_P): sub_para = Paragraph(sub_doc[j], CT_P) sub_temp = sub_doc[j].style try: sub_temp_num = list(map(int, re.findall(r'\d+', sub_temp)))[0] except: sub_temp_num = 0 if sub_temp_num != 0: if sub_doc[j].style == temp or temp_num > sub_temp_num: source_end_index = i+j break else: if len(sub_doc)-1 == j: source_end_index = i+j break source_start_index = i break # if no nubered headings else: sub_doc = body[i+1::] for j in range(len(sub_doc)): if isinstance(sub_doc[j], CT_P): sub_para = Paragraph(sub_doc[j], CT_P) if sub_doc[j].style == None: pass else: if 'Heading' in sub_doc[j].style: source_end_index = i+j break source_start_index = i break return source_start_index, source_end_index except Exception as e: csr_except_logger.critical(str(e) + '\n' + str(traceback.format_exc()))
def run(self, cwd=None, ignore_exception=None, **inputs): """Execute this interface. This interface will not raise an exception if runtime.returncode is non-zero. Parameters ---------- cwd : specify a folder where the interface should be run inputs : allows the interface settings to be updated Returns ------- results : an InterfaceResult object containing a copy of the instance that was executed, provenance information and, if successful, results """ from ...utils.profiler import ResourceMonitor # if ignore_exception is not provided, taking self.ignore_exception if ignore_exception is None: ignore_exception = self.ignore_exception # Tear-up: get current and prev directories syscwd = rgetcwd(error=False) # Recover when wd does not exist if cwd is None: cwd = syscwd os.chdir(cwd) # Change to the interface wd enable_rm = config.resource_monitor and self.resource_monitor self.inputs.trait_set(**inputs) self._check_mandatory_inputs() self._check_version_requirements(self.inputs) interface = self.__class__ self._duecredit_cite() # initialize provenance tracking store_provenance = str2bool( config.get('execution', 'write_provenance', 'false')) env = deepcopy(dict(os.environ)) if self._redirect_x: env['DISPLAY'] = config.get_display() runtime = Bunch(cwd=cwd, prevcwd=syscwd, returncode=None, duration=None, environ=env, startTime=dt.isoformat(dt.utcnow()), endTime=None, platform=platform.platform(), hostname=platform.node(), version=self.version) mon_sp = None if enable_rm: mon_freq = float( config.get('execution', 'resource_monitor_frequency', 1)) proc_pid = os.getpid() iflogger.debug( 'Creating a ResourceMonitor on a %s interface, PID=%d.', self.__class__.__name__, proc_pid) mon_sp = ResourceMonitor(proc_pid, freq=mon_freq) mon_sp.start() # Grab inputs now, as they should not change during execution inputs = self.inputs.get_traitsfree() outputs = None try: runtime = self._pre_run_hook(runtime) runtime = self._run_interface(runtime) runtime = self._post_run_hook(runtime) outputs = self.aggregate_outputs(runtime) except Exception as e: import traceback # Retrieve the maximum info fast runtime.traceback = traceback.format_exc() # Gather up the exception arguments and append nipype info. exc_args = e.args if getattr(e, 'args') else tuple() exc_args += ( 'An exception of type %s occurred while running interface %s.' % (type(e).__name__, self.__class__.__name__), ) if config.get('logging', 'interface_level', 'info').lower() == 'debug': exc_args += ('Inputs: %s' % str(self.inputs), ) runtime.traceback_args = ('\n'.join( ['%s' % arg for arg in exc_args]), ) if not ignore_exception: raise finally: # This needs to be done always runtime.endTime = dt.isoformat(dt.utcnow()) timediff = parseutc(runtime.endTime) - parseutc(runtime.startTime) runtime.duration = (timediff.days * 86400 + timediff.seconds + timediff.microseconds / 1e6) results = InterfaceResult(interface, runtime, inputs=inputs, outputs=outputs, provenance=None) # Add provenance (if required) if store_provenance: # Provenance will only throw a warning if something went wrong results.provenance = write_provenance(results) # Make sure runtime profiler is shut down if enable_rm: import numpy as np mon_sp.stop() runtime.mem_peak_gb = None runtime.cpu_percent = None # Read .prof file in and set runtime values vals = np.loadtxt(mon_sp.fname, delimiter=',') if vals.size: vals = np.atleast_2d(vals) runtime.mem_peak_gb = vals[:, 1].max() / 1024 runtime.cpu_percent = vals[:, 2].max() runtime.prof_dict = { 'time': vals[:, 0].tolist(), 'cpus': vals[:, 1].tolist(), 'rss_GiB': (vals[:, 2] / 1024).tolist(), 'vms_GiB': (vals[:, 3] / 1024).tolist(), } os.chdir(syscwd) return results
# Copyright (c) IBM Corporation 2020 # Apache License, Version 2.0 (see https://opensource.org/licenses/Apache-2.0) from __future__ import (absolute_import, division, print_function) __metaclass__ = type import json import traceback try: import requests except Exception: requests = None LIB_IMP_ERR = traceback.format_exc() def get_auth_argument_spec(): """ Return the arguments of ansible module used for authentication with z/OSMF server. :rtype: dict[str, dict] """ return dict(zmf_host=dict(required=True, type='str'), zmf_port=dict(required=False, type='int'), zmf_user=dict(required=False, type='str', no_log=True), zmf_password=dict(required=False, type='str', no_log=True), zmf_crt=dict(required=False, type='str', no_log=True), zmf_key=dict(required=False, type='str', no_log=True)) def get_connect_argument_spec(): """ Return the arguments of ansible module used for session setup.
def main(): argument_spec = ovirt_full_argument_spec( state=dict( choices=['present', 'absent', 'attached', 'detached', 'exported', 'imported'], default='present' ), id=dict(default=None), name=dict(default=None, aliases=['alias']), description=dict(default=None), vm_name=dict(default=None), vm_id=dict(default=None), size=dict(default=None), interface=dict(default=None, choices=['virtio', 'ide', 'virtio_scsi']), storage_domain=dict(default=None), storage_domains=dict(default=None, type='list', elements='str'), profile=dict(default=None), quota_id=dict(default=None), format=dict(default='cow', choices=['raw', 'cow']), content_type=dict( default='data', choices=['data', 'iso', 'hosted_engine', 'hosted_engine_sanlock', 'hosted_engine_metadata', 'hosted_engine_configuration'] ), backup=dict(default=None, type='str', choices=['incremental']), sparse=dict(default=None, type='bool'), bootable=dict(default=None, type='bool'), shareable=dict(default=None, type='bool'), logical_unit=dict(default=None, type='dict'), download_image_path=dict(default=None), upload_image_path=dict(default=None, aliases=['image_path']), force=dict(default=False, type='bool'), sparsify=dict(default=None, type='bool'), openstack_volume_type=dict(default=None), image_provider=dict(default=None), host=dict(default=None), wipe_after_delete=dict(type='bool', default=None), activate=dict(default=None, type='bool'), ) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, ) lun = module.params.get('logical_unit') host = module.params['host'] # Fail when host is specified with the LUN id. Lun id is needed to identify # an existing disk if already available inthe environment. if (host and lun is None) or (host and lun.get("id") is None): module.fail_json( msg="Can not use parameter host ({0!s}) without " "specifying the logical_unit id".format(host) ) check_sdk(module) check_params(module) try: disk = None state = module.params['state'] auth = module.params.get('auth') connection = create_connection(auth) disks_service = connection.system_service().disks_service() disks_module = DisksModule( connection=connection, module=module, service=disks_service, ) force_create = False vm_service = get_vm_service(connection, module) if lun: disk = _search_by_lun(disks_service, lun.get('id')) else: disk = disks_module.search_entity(search_params=searchable_attributes(module)) if vm_service and disk: # If the VM don't exist in VMs disks, but still it's found it means it was found # for template with same name as VM, so we should force create the VM disk. force_create = disk.id not in [a.disk.id for a in vm_service.disk_attachments_service().list() if a.disk] ret = None # First take care of creating the VM, if needed: if state in ('present', 'detached', 'attached'): # Always activate disk when its being created if vm_service is not None and disk is None: module.params['activate'] = module.params['activate'] is None or module.params['activate'] ret = disks_module.create( entity=disk if not force_create else None, result_state=otypes.DiskStatus.OK if lun is None else None, fail_condition=lambda d: d.status == otypes.DiskStatus.ILLEGAL if lun is None else False, force_create=force_create, _wait=True if module.params['upload_image_path'] else module.params['wait'], ) is_new_disk = ret['changed'] ret['changed'] = ret['changed'] or disks_module.update_storage_domains(ret['id']) # We need to pass ID to the module, so in case we want detach/attach disk # we have this ID specified to attach/detach method: module.params['id'] = ret['id'] # Upload disk image in case it's new disk or force parameter is passed: if module.params['upload_image_path'] and (is_new_disk or module.params['force']): if module.params['format'] == 'cow' and module.params['content_type'] == 'iso': module.warn("To upload an ISO image 'format' parameter needs to be set to 'raw'.") uploaded = upload_disk_image(connection, module) ret['changed'] = ret['changed'] or uploaded # Download disk image in case it's file don't exist or force parameter is passed: if ( module.params['download_image_path'] and (not os.path.isfile(module.params['download_image_path']) or module.params['force']) ): downloaded = download_disk_image(connection, module) ret['changed'] = ret['changed'] or downloaded # Disk sparsify, only if disk is of image type: if not module.check_mode: disk = disks_service.disk_service(module.params['id']).get() if disk.storage_type == otypes.DiskStorageType.IMAGE: ret = disks_module.action( action='sparsify', action_condition=lambda d: module.params['sparsify'], wait_condition=lambda d: d.status == otypes.DiskStatus.OK, ) # Export disk as image to glance domain elif state == 'exported': disk = disks_module.search_entity() if disk is None: module.fail_json( msg="Can not export given disk '%s', it doesn't exist" % module.params.get('name') or module.params.get('id') ) if disk.storage_type == otypes.DiskStorageType.IMAGE: ret = disks_module.action( action='export', action_condition=lambda d: module.params['image_provider'], wait_condition=lambda d: d.status == otypes.DiskStatus.OK, storage_domain=otypes.StorageDomain(name=module.params['image_provider']), ) elif state == 'imported': glance_service = connection.system_service().openstack_image_providers_service() image_provider = search_by_name(glance_service, module.params['image_provider']) images_service = glance_service.service(image_provider.id).images_service() entity_id = get_id_by_name(images_service, module.params['name']) images_service.service(entity_id).import_( storage_domain=otypes.StorageDomain( name=module.params['storage_domain'] ) if module.params['storage_domain'] else None, disk=otypes.Disk( name=module.params['name'] ), import_as_template=False, ) # Wait for disk to appear in system: disk = disks_module.wait_for_import( condition=lambda t: t.status == otypes.DiskStatus.OK ) ret = disks_module.create(result_state=otypes.DiskStatus.OK) elif state == 'absent': ret = disks_module.remove() # If VM was passed attach/detach disks to/from the VM: if vm_service: disk_attachments_service = vm_service.disk_attachments_service() disk_attachments_module = DiskAttachmentsModule( connection=connection, module=module, service=disk_attachments_service, changed=ret['changed'] if ret else False, ) if state == 'present' or state == 'attached': ret = disk_attachments_module.create() if lun is None: wait( service=disk_attachments_service.service(ret['id']), condition=lambda d: follow_link(connection, d.disk).status == otypes.DiskStatus.OK, wait=module.params['wait'], timeout=module.params['timeout'], ) elif state == 'detached': ret = disk_attachments_module.remove() # When the host parameter is specified and the disk is not being # removed, refresh the information about the LUN. if state != 'absent' and host: hosts_service = connection.system_service().hosts_service() host_id = get_id_by_name(hosts_service, host) disks_service.disk_service(disk.id).refresh_lun(otypes.Host(id=host_id)) module.exit_json(**ret) except Exception as e: module.fail_json(msg=str(e), exception=traceback.format_exc()) finally: connection.close(logout=auth.get('token') is None)
#from __future__ import unicode_literals import os import sys import traceback #from random import shuffle srcdir = os.path.abspath(os.path.dirname(__file__)) libdir = os.path.join(srcdir, 'pylib') sys.path.append(libdir) try: # pylint: disable=wrong-import-position from harisekhon.utils import log_option, uniq_list_ordered from harisekhon.utils import validate_int from find_active_server import FindActiveServer except ImportError as _: print(traceback.format_exc(), end='') sys.exit(4) __author__ = 'Hari Sekhon' __version__ = '0.6.2' class FindActiveHadoopYarnResourceManager(FindActiveServer): def __init__(self): # Python 2.x super(FindActiveHadoopYarnResourceManager, self).__init__() # Python 3.x # super().__init__() self.default_port = 8088 self.port = self.default_port
def handle_youtube_dl_error(error): logging.error(traceback.format_exc()) result = jsonify({'error': str(error)}) result.status_code = 500 return result
if set(needle) <= set(line): # if A is a subset of B if section_idx > 0: data_per_section[section_idx - 1].append(old_data[start:idx - 1]) start = idx + 1 data_per_section.append([line]) if section_idx == len(section_needles) - 1: data_per_section[section_idx] = [line, old_data[start:]] break section_idx += 1 data_per_section[0][1] = update_py_packages(data_per_section[0][1], path_sources_dir, args.verbose) data_per_section[3][1] = update_js_dependencies(data_per_section[3][1], args.verbose) write_to_csv(data_per_section, licenses_csv) if not args.path: os.unlink(path_sources_pkg) rmtree(path_sources_dir) if __name__ == "__main__": try: args = parse_arguments() main(args) sys.exit(0) except Exception: sys.stderr.write(traceback.format_exc()) sys.exit(1)
def log(msg, level=xbmc.LOGDEBUG): if DEBUG == False and level != xbmc.LOGERROR: return if level == xbmc.LOGERROR: msg = '%s, %s'%(uni(msg),traceback.format_exc()) xbmc.log('%s-%s-%s'%(ADDON_ID,ADDON_VERSION,uni(msg)),level)
def handle_wrong_parameter(error): logging.error(traceback.format_exc()) result = jsonify({'error': str(error)}) result.status_code = 400 return result