def test_system_students_have_a_verified_email_14763(self): """Student has a verified email. Steps: Go to https://tutor-qa.openstax.org/ Click on the 'Login' button Enter the student account in the username and password text boxes Click on the 'Sign in' button Click "My Account" from the user menu Expected Result: There is no "Click to verify" link next to the email """ self.ps.test_updates['name'] = 't2.09.005' \ + inspect.currentframe().f_code.co_name[4:] self.ps.test_updates['tags'] = [ 't2', 't2.09', 't2.09.005', '14763' ] self.ps.test_updates['passed'] = False # Test steps and verification assertions raise NotImplementedError(inspect.currentframe().f_code.co_name) self.ps.test_updates['passed'] = True
def get_final_jar_files(jar_files): name_trunk = os.path.basename(os.getcwd()) dir_STOR = "STOR_%s" % name_trunk #debug print "[DEBUG:%d]" % inspect.currentframe().f_lineno; #sys.exit() print "dir_STOR=", dir_STOR print "jar_files=", jar_files print "os.listdir(dir_STOR)=", os.listdir(dir_STOR) final_jar_files = list() each_files = list() for name in jar_files: #debug print "[DEBUG:%d]" % inspect.currentframe().f_lineno; #sys.exit() print "name=%s" % name print "items=", [item for item in os.listdir(dir_STOR) if name.split(".")[0] in item and name.split(".")[1] in item] # print "items=", [item for item in os.listdir(dir_STOR) # if name.split(".")[0] in item] # print "items=", [item for item in os.listdir(dir_STOR) # if name.split(".")[0] in item and os.path.isfile(item)] # each_files.append([item for item in os.listdir(dir_STOR) each_files.append([item for item in os.listdir(dir_STOR) if name.split(".")[0] in item and os.path.isfile(item)]) # if name in item and os.path.isfile(item)]) return each_files
def log(msg): global logfile if logfile==None: scriptname = script_name() logfile = open(BILD_LOG_DIR+"/"+scriptname+".log", "w") # assume this closes upon Python exit if msg is None: return msg = msg.strip() if len(msg)==0: return caller = inspect.currentframe().f_back.f_code.co_name line = inspect.currentframe().f_back.f_lineno filename = os.path.basename(inspect.currentframe().f_back.f_code.co_filename) prefix = time.strftime('%x %X') if inspect.currentframe().f_back is not None: filename2 = inspect.currentframe().f_back.f_back.f_code.co_filename line2 = inspect.currentframe().f_back.f_back.f_lineno if caller=="require": logfile.write("[%s] %s\n" % (prefix,msg)) elif caller=="exec_and_log": caller = inspect.currentframe().f_back.f_back.f_code.co_name line = inspect.currentframe().f_back.f_back.f_lineno filename2 = inspect.currentframe().f_back.f_back.f_back.f_code.co_filename line2 = inspect.currentframe().f_back.f_back.f_back.f_lineno fullmsg = "[%s %s %s:%d %s:%d] %s\n" % ( prefix, caller, filename2, line2, filename, line, msg) logfile.write(fullmsg) if debug: print fullmsg,
def test_user_verify_email_using_the_new_pin_approach_14764(self): """Verify email using the new PIN approach. Steps: Go to https://tutor-qa.openstax.org/ Click on the 'Login' button Enter the teacher user account in the username and password text boxes Click on the 'Sign in' button If the user has more than one course, click on a Tutor course name Click "Verify now" on the orange header that pops up Enter a 6 digit verification code into the text box from the email Click "Go" Expected Result: A message confirming email verification pops up """ self.ps.test_updates['name'] = 't2.09.006' \ + inspect.currentframe().f_code.co_name[4:] self.ps.test_updates['tags'] = [ 't2', 't2.09', 't2.09.006', '14764' ] self.ps.test_updates['passed'] = False # Test steps and verification assertions raise NotImplementedError(inspect.currentframe().f_code.co_name) self.ps.test_updates['passed'] = True
def deny_empty_or_whitespace(string='', variable_name=''): """ Prevent ``string`` or ``variable_name`` from being empty or only containing whitespace. :raises ValueError: Raises a ValueError if the string or the variable_name is empty or only contains whitespace. The ValueError contains the name of the calling function and the variable name used in the calling function. :type string: str :type variable_name: str :param string: The string to test. :param variable_name: The name of the variable used in the calling function. :return: None """ if not variable_name: # '' and None cases calling_function = currentframe().f_back.f_code.co_name raise ValueError(calling_function + ': variable_name input cannot be empty or None.') if not variable_name.strip(): # whitespace case calling_function = currentframe().f_back.f_code.co_name raise ValueError(calling_function + ': variable_name input cannot only contain whitespace.') if not string: # '' and None cases calling_function = currentframe().f_back.f_code.co_name raise ValueError(calling_function + ':', variable_name, 'cannot be empty or None.') if not string.strip(): # whitespace case calling_function = currentframe().f_back.f_code.co_name raise ValueError(calling_function + ':', variable_name, 'cannot only contain whitespace.')
def __init__(self, path, *args, **kwargs): """ WARNING: Static() must be reentrant AS File reinstanciate itself via createSimilarFile() method. In consequence, content_type argument MUST be in kwargs """ basedir = os.path.dirname(inspect.getframeinfo(inspect.currentframe().f_back)[0]) name = None if 'name' in kwargs: name = kwargs['name']; del kwargs['name'] if not os.path.isabs(path): path = os.path.join(basedir, path) static.File.__init__(self, path, *args, **kwargs) if name is not None: #TODO: used dedicaced module to create anonymous object class anonymous(object): pass clb = anonymous() clb.__callable__ = {'url': '/' + name} # get module instance app = inspect.getmodule(inspect.currentframe().f_back) # clb module is current, aka mother.template # we want it to be app, to get correct url() resolution clb.__module__ = app.__name__ setattr(app, name, clb) self.content_type = kwargs.get('content_type', '*/*') # set default encoding to utf8 self.contentTypes['.css'] = 'text/css; charset=utf-8' self.contentTypes['.html'] = 'text/html; charset=utf-8'
def skill_edit(request): u"""スキルを登録・編集する管理画面 """ # skill = Skill.all().filter('key =', request.form['key']).fetch(1) logging.debug(inspect.currentframe().f_lineno) logging.debug(request.form) form = SkillForm() if request.method == "POST": logging.debug(inspect.currentframe().f_lineno) logging.debug(request.form) if request.form['name']: logging.debug(inspect.currentframe().f_lineno) logging.debug(request.form) skill = Skill.get_skill(request.form['name']) if not skill: skill = Skill.add_skill( request.form['name'], int(request.form['timing']), int(request.form['typ']), request.form['param'], int(request.form['value'])) logging.debug(inspect.currentframe().f_lineno) logging.debug(request.form['timing']) skill.timing = int(request.form['timing']) skill.typ = int(request.form['typ']) skill.param = request.form['param'] skill.value = int(request.form['value']) # skill.job = request.form['job'] skill.put() return redirect(url_for('htbrpg2kay/index')) return render_to_response('htbrpg2kay/skill_edit.html', {'form': form.as_widget()})
def set_util_dir(dir_root): # dir_root = os.getcwd() root_name = os.path.basename(dir_root) stor_dir_name = "STOR_%s" % root_name log_dir_name = "log_%s" % root_name tmp_dir_name = "tmp" dir_STOR = os.path.join(dir_root, stor_dir_name) dir_log = os.path.join(dir_root, log_dir_name) dir_tmp = os.path.join(dir_root, tmp_dir_name) if not (os.path.isdir(dir_STOR)): os.mkdir(dir_STOR) print "\n[DEBUG:%d]" % inspect.currentframe().f_lineno, print "Dir created: %s" % stor_dir_name else: print "Dir exists: %s" % stor_dir_name if not (os.path.isdir(dir_log)): os.mkdir(dir_log) print "\n[DEBUG:%d]" % inspect.currentframe().f_lineno, print "Dir created: %s" % log_dir_name else: print "Dir exists: %s" % log_dir_name if not (os.path.isdir(dir_tmp)): os.mkdir(dir_tmp) print "\n[DEBUG:%d]" % inspect.currentframe().f_lineno, print "Dir created: %s" % tmp_dir_name else: print "Dir exists: %s" % tmp_dir_name
def handle_args(optDict, optListA, optListB, optListC, dir_root): """ <Options> -V <directory1[,directory2]> Backup all the files in the directory. Example: webdev_util.py -V control,view """ # vars target_dirs = list() # handle options for key in optDict.keys(): if key == "-V": target_dirs = optDict[key].split(",") flag = 0 for target in target_dirs: # target_dir = os.path.join(dir_root, optDict[key]) target_dir = os.path.join(dir_root, target) if not os.path.isdir(target_dir): print "\n[DEBUG:%d]" % inspect.currentframe().f_lineno; print "Directory doesn' t exist: %s" \ % target_dir flag = 1 # sys.exit() if flag == 1: sys.exit(0) else: print "\n[DEBUG:%d]" % inspect.currentframe().f_lineno; print "Start processing versioning on the directories: %s" \ % target_dirs return target_dirs
def testIfFileExists(self,filename): """ If file exists in all possible folders, return complete file path, otherwise, return None """ #print "*", filename if os.path.isfile(filename) is True : #print "*1", filename return filename elif os.path.isfile(os.path.join(os.getcwd(),filename)) is True : #print "*2", os.path.isfile(os.path.join(os.getcwd(),filename)) cfile = os.path.isfile(os.path.join(os.getcwd(),filename)) return cfile elif os.path.isfile(os.path.join(os.path.dirname(sys.argv[0]),filename)) is True : #print "*3", os.path.join(os.path.dirname(sys.argv[0]),filename) cfile = os.path.join(os.path.dirname(sys.argv[0]),filename) return cfile elif os.path.isfile(os.path.join(os.path.dirname(inspect.getfile( inspect.currentframe() )),filename)) is True : #print "*4", os.path.join(os.path.dirname(inspect.getfile( inspect.currentframe() )),filename) cfile = os.path.join(os.path.dirname(inspect.getfile( inspect.currentframe() )),filename) return cfile else : #print "*5", filename return None
def testIfFileExistsInFolders(self,filename,folderList): """ If file exists in all possible folders and in the folderList provided return complete file path, otherwise, return None """ #print "*", filename if os.path.isfile(filename) is True : #print "*1", filename return filename elif os.path.isfile(os.path.join(os.getcwd(),filename)) is True : #print "*2", os.path.isfile(os.path.join(os.getcwd(),filename)) return os.path.isfile(os.path.join(os.getcwd(),filename)) elif os.path.isfile(os.path.join(os.path.dirname(sys.argv[0]),filename)) is True : #print "*3", os.path.join(os.path.dirname(sys.argv[0]),filename) return os.path.join(os.path.dirname(sys.argv[0]),filename) elif os.path.isfile(os.path.join(os.path.dirname(inspect.getfile( inspect.currentframe() )),filename)) is True : #print "*4", os.path.join(os.path.dirname(inspect.getfile( inspect.currentframe() )),filename) return os.path.join(os.path.dirname(inspect.getfile( inspect.currentframe() )),filename) else : for i in folderList: if os.path.isfile(os.path.join(i,filename)) is True: return os.path.join(i,filename) return None
def test_student_submit_a_question_to_customer_support_58347(self): """Submit a question to Customer Support. Steps: Click "Get Help" from the user menu in the upper right corner of the screen Enter a question or search words into the search engine Click "Search" or press enter Click on a search result Scroll to the bottom of the page Click "Contact Us" Fill out the required fields Enter "Submit" Expected Result: The message "Thank you for your message! We'll be back to you within one business day" is displayed """ self.ps.test_updates["name"] = "t2.18.028" + inspect.currentframe().f_code.co_name[4:] self.ps.test_updates["tags"] = ["t2", "t2.18", "t2.18.028", "58347"] self.ps.test_updates["passed"] = False # Test steps and verification assertions raise NotImplementedError(inspect.currentframe().f_code.co_name) self.ps.test_updates["passed"] = True
def test_user_inapp_notification_of_downtime_14752(self): """In-app Notification of downtime. Steps: Go to Tutor Log in as admin Click "Admin" from the user menu Click "System Setting" Click "Notifications" Enter a new notification into the text box Click "Add" Log out of admin Log in as teacher01 Expected Result: An orange header with the notification pops up when you sign in """ self.ps.test_updates["name"] = "t2.18.001" + inspect.currentframe().f_code.co_name[4:] self.ps.test_updates["tags"] = ["t2", "t2.18", "t2.18.001", "14752"] self.ps.test_updates["passed"] = False # Test steps and verification assertions raise NotImplementedError(inspect.currentframe().f_code.co_name) self.ps.test_updates["passed"] = True
def test_student_close_window_after_submitting_feedback_for_58343(self): """Close window after submitting feedback for an article. Steps: Click "Get Help" from the user menu in the upper right corner of the screen Enter a question or search words into the search engine Click "Search" or press enter Click on a search result Scroll to "Feedback" Click "No" Enter feedback into the box that pops up Click "Submit" Click "Close window" Expected Result: The popup box closes and the message "Thanks for your feedback" displays beneath "Feedback" """ self.ps.test_updates["name"] = "t2.18.025" + inspect.currentframe().f_code.co_name[4:] self.ps.test_updates["tags"] = ["t2", "t2.18", "t2.18.025", "58343"] self.ps.test_updates["passed"] = False # Test steps and verification assertions raise NotImplementedError(inspect.currentframe().f_code.co_name) self.ps.test_updates["passed"] = True
def test_student_cancel_feedback_58344(self): """Cancel feedback. Steps: Click "Get Help" from the user menu in the upper right corner of the screen Enter a question or search words into the search engine Click "Search" or press enter Click on a search result Scroll to "Feedback" Click "No" [optional] Enter feedback into text box Click "Cancel" Expected Result: The popup box closes """ self.ps.test_updates["name"] = "t2.18.026" + inspect.currentframe().f_code.co_name[4:] self.ps.test_updates["tags"] = ["t2", "t2.18", "t2.18.026", "58344"] self.ps.test_updates["passed"] = False # Test steps and verification assertions raise NotImplementedError(inspect.currentframe().f_code.co_name) self.ps.test_updates["passed"] = True
def add(self, name, paramtype, level, description, default = None, min = None, max = None, edit_method = ""): newparam = { 'name' : name, 'type' : paramtype, 'default' : default, 'level' : level, 'description' : description, 'min' : min, 'max' : max, 'srcline' : inspect.currentframe().f_back.f_lineno, 'srcfile' : inspect.getsourcefile(inspect.currentframe().f_back.f_code), 'edit_method' : edit_method, } if type == str_t and (max != None or min != None): raise Exception("Max or min specified for %s, which is of string type"%name) pattern = r'^[a-zA-Z][a-zA-Z0-9_]*$' if not re.match(pattern, name): raise Exception("The name of field \'%s\' is definitely not a valid C++ variable name! Change it accordingly."%name) self.gen.fill_type(newparam) self.gen.check_type_fill_default(newparam, 'default', self.gen.defval[paramtype]) self.gen.check_type_fill_default(newparam, 'max', self.gen.maxval[paramtype]) self.gen.check_type_fill_default(newparam, 'min', self.gen.minval[paramtype]) self.parameters.append(newparam)
def test_student_submit_feedback_for_an_article_58342(self): """Submit feedback for an article. Steps: Click "Get Help" from the user menu in the upper right corner of the screen Enter a question or search words into the search engine Click "Search" or press enter Click on a search result Scroll to "Feedback" Click "No" Enter feedback into the box that pops up Click "Submit" Expected Result: A message that says "Thanks for your feedback!" is displayed in the box """ self.ps.test_updates["name"] = "t2.18.024" + inspect.currentframe().f_code.co_name[4:] self.ps.test_updates["tags"] = ["t2", "t2.18", "t2.18.024", "58342"] self.ps.test_updates["passed"] = False # Test steps and verification assertions raise NotImplementedError(inspect.currentframe().f_code.co_name) self.ps.test_updates["passed"] = True
def assertSamePy(expected, actual): mensage = "" expectedType = str(type(expected)) actualType = str(type(actual)) status = False lineNumber = None #if isTest == True: if not type(expected) == type(actual): mensage += "O tipo das variáveis inseridas não é igual" lineNumber = inspect.currentframe().f_back.f_lineno #print mensage, " na linha: ", inspect.currentframe().f_back.f_lineno status = False else: if expected is actual: mensage += "O valor esperado <" + str(expected) + "> e o valor actual <" + str(actual) + "> referem-se ao mesmo objecto" lineNumber = inspect.currentframe().f_back.f_lineno #print mensage, " na linha: ", inspect.currentframe().f_back.f_lineno status = True else: mensage += "O Valor esperado «" + str(expected) +"» não se refere ao mesmo objecto que o valor atual «" + str(actual) + "»" lineNumber = inspect.currentframe().f_back.f_lineno #print mensage, " na linha: ", inspect.currentframe().f_back.f_lineno status = False #print "oaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", PyUniti.failsListPosition PyUniti.failsList[PyUniti.failsListPosition].addTest(PyUniti.UnitiTests.AssertSamePy(expected, actual, expectedType, actualType, lineNumber, status, mensage)) #print "oaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", PyUniti.failsListPosition pass
def is_empty(param_list): """ :param param_list: :return: """ routine_name = '##' + os.path.basename(__file__) + ':is_empty: ' print routine_name ReturnValue.call_stack.append(routine_name) msg = "[%s]: non-empty" % inspect.currentframe().f_back.f_lineno if not param_list.__len__(): msg = "[%s]: Empty value provided for <param_list>." return ReturnValue(call_status=call_err, msg=msg) elif param_list.__len__() == 1: if param_list: msg = "[%s]: [%s] non-empty." % (inspect.currentframe( ).f_back.f_lineno, param_list) return ReturnValue(call_status=call_pass, msg=msg) else: msg = "[%s]: [%s] Empty." % ( inspect.currentframe().f_back.f_lineno, param_list) return ReturnValue(call_status=call_fail, msg=msg) else: for param in param_list: if not param: msg = "[%s]: Empty value provided for parameter [%s]" %\ inspect.currentframe().f_back.f_lineno, param return ReturnValue(call_status=call_fail, msg=msg) return ReturnValue(call_status=call_fail, msg=msg)
def __init__(self, *instructions): self.instructions = instructions self.filename = getsourcefile(currentframe().f_back) self.lineno = currentframe().f_back.f_lineno for child in self.instructions: child.parent = self _test_instruction(child, self)
def test_get_frame_var(self): value = pyxdebug.get_frame_var(inspect.currentframe(), 'self') assert value == self local_var = 123 value = pyxdebug.get_frame_var(inspect.currentframe(), 'local_var') assert value == local_var
def parseLeftAxiom(self): """ predicateList | true """ tok = self.tokenizer if(not tok.current()): self.perror(lineNo(currentframe())+" unexpected end of tokens ") if(tok.current().ttype != 'WORD'): self.perror(lineNo(currentframe())+" expected token of type 'WORD' got' " +str(tok.current().ttype)+"'") if(tok.current() == 'true'): tok.readnext() return SpecialFormula('true') else: formula = Formula() formula.append(self.parsePredicate()) while(1): if(not tok.current()): self.perror(lineNo(currentframe())+" unexpected end of tokens") break elif(tok.current() == ','): # these are always separated by ',' so no need to store # the logical separator since formulas are geometric tok.readnext() formula.append(self.parsePredicate()) elif(tok.current() == '=>'): break else: self.perror(lineNo(currentframe())+" expected ',' or '=>' got '"+ tok.current().tvalue+"'") break return formula
def loop(self): """ Main loop. Waits for new connections. """ self.run = True; while self.run: try: rlist, wlist, elist = select.select([self.slave_connection], [], [], SELECT_TIMEOUT); for connection in rlist: self.accept_new_slave_connection(connection); rlist, wlist, elist = select.select([self.cmd_connection], [], [], SELECT_TIMEOUT); for connection in rlist: self.accept_new_cmd_connection(connection); except KeyboardInterrupt as e: frameinfo = getframeinfo(currentframe()); self.logger.info('in loop: Keyboard interrupt: leaving program'); print("[ MASTER DAEMON " + frameinfo.filename + ":" + str(frameinfo.lineno) + " ]: Keyboard Interrupt"); self.stop(); sys.exit(0); except ValueError as e: frameinfo = getframeinfo(currentframe()); self.logger.error('in loop: Value error: ' + str(e)); print("[ MASTER DAEMON " + frameinfo.filename + ":" + str(frameinfo.lineno) + "]: Value Error"); print(e); pass;
def main(): email = None password = None filepath = None convert = 'false' # Convert to Google Docs format by default default_chunk_size = gdata.client.ResumableUploader.DEFAULT_CHUNK_SIZE; chunk_size = default_chunk_size debug = False ssl = True try: opts, args = getopt.getopt( sys.argv[1:], '', ['email=', 'password='******'filepath=', 'convert', 'chunk_size=', 'ssl', 'debug']) except getopt.error, msg: print 'python '+inspect.getfile( inspect.currentframe() )+''' --email= [your Google Docs email] --password= [your Google Docs password] --filepath= [file to upload] --convert [converts uploaded file] --chunk_size= [size of upload chunks. default is '''+str(default_chunk_size)+'''] --nossl [disables HTTPS if set] --debug [prints debug info if set]''' print ('Example usage: '+inspect.getfile( inspect.currentframe() )+' ' '--filepath=/path/to/test.doc --convert --nossl') sys.exit(2)
def open_or_queue_valve(self, valve): """ Opens a valve if no other valve are opened, else it will be added to the queue :type valve: MagneticValve """ if isinstance(valve, MagneticValve): print_debug(self.debug, currentframe().f_code.co_name, 'Got opening event from valve: {name}'.format(name=valve.get_name), __name__) # Open the valve if no valve is opened # Else, add the valve to the queue if self.__current_opened_valve is None: print_debug(self.debug, currentframe().f_code.co_name, 'Will open valve: {name}'.format(name=valve.get_name), __name__) self.__current_opened_valve = valve # Open the plant's pump before we open any valves! # If the pump is open we will just open the valve, else we must wait for the open-event pump = pumps.get(valve.get_pump_id) self.__current_opened_pump = pump if not pump.is_opened: pump.turn_on_pump(valve) else: self.__open_current(valve) else: self.add_valve_to_queue(valve) else: error = TypeError error.message = 'valve needs to be of the type MagneticValve!' raise error
def upload_files2(cur_dir_src, root_dir_dst): dir_path_dst = get_ubuntu_path(cur_dir_src, root_dir_dst) # 01 get files and dirs lists ==================== list_all = os.listdir(cur_dir_src) #debug # print list_all # print "cur_dir_src", cur_dir_src # print inspect.currentframe().f_lineno; sys.exit() allocate_list2(list_all, cur_dir_src) # list_files, list_dirs = allocate_list2(list_all, cur_dir_src) print "OK." #debug print inspect.currentframe().f_lineno; sys.exit() # 02 get ssh ================ username = "******"; passwd = "5n6WW09Y" ssh = get_ssh(username, passwd) # get ubuntu path ================ # root_dir_dst = "/var/www/cgi-bin" new_path = get_ubuntu_path(cur_dir_src, root_dir_dst) # 03 create dirs in the remote ================ res = makedir(ssh, cur_dir_dst) if res == 1: print "Dir created: %s" % cur_dir_dst else: print res
def ctx(action, fields=None): """Create a context with current logger""" push_log(inspect.currentframe().f_back, action, fields) start() yield end() pop_log(inspect.currentframe().f_back)
def parseRightAxiom(self): """ predicateList | goal | false """ tok = self.tokenizer curToken = tok.current() if(not curToken): self.perror(lineNo(currentframe())+" unexpected end of tokens ") if(curToken.ttype != 'WORD'): self.perror(lineNo(currentframe())+" expected token of type 'WORD' got' " +str(curToken.ttype)+"'") if(curToken.tvalue in ['false','goal']): tok.readnext() return SpecialFormula(curToken.tvalue) else: formula = Formula() formula.append(self.parsePredicate()) while(1): if(not tok.current()): self.perror(lineNo(currentframe())+" unexpected end of tokens") break elif(tok.current().tvalue in [',', ';']): formula.append(tok.current().tvalue) tok.readnext() formula.append(self.parsePredicate()) elif(tok.current() == '.'): break else: self.perror(lineNo(currentframe())+" expected ',' or '.' got '"+ tok.current().tvalue+"'") break return formula
def test_user_reset_password_with_an_unverified_email_address_14789(self): """Reset password with an unverified email address. Steps: Go to https://tutor-qa.openstax.org/ Click on the 'Login' button Enter any user account in the username and password text boxes Click on the 'Sign in' button If the user has more than one course, click on a Tutor course name Click "My Account" from the user menu Click on the pencil next to the password Enter a new password Click the checkmark Expected Result: The user is presented with the message that confirms password change """ self.ps.test_updates['name'] = 't2.09.025' \ + inspect.currentframe().f_code.co_name[4:] self.ps.test_updates['tags'] = [ 't2', 't2.09', 't2.09.025', '14789' ] self.ps.test_updates['passed'] = False # Test steps and verification assertions raise NotImplementedError(inspect.currentframe().f_code.co_name) self.ps.test_updates['passed'] = True
def __init__(self, stream): """Do not directly call this method, it is called automatically Use stream.available()""" self.stream = stream self.filename = getsourcefile(currentframe().f_back) self.lineno = currentframe().f_back.f_lineno _test_stream(stream, self)
def getWSDLPath(filename): lis = inspect.getabsfile(inspect.currentframe()).split("/") st = "/" for l in lis[:-1]: st = os.path.join(st, l) return os.path.join(st, "wsdl", filename)
import os import copy import inspect configurations = os.path.realpath(inspect.getfile(inspect.currentframe())) # this file configurations = os.path.dirname(configurations) # ggH2016 configurations = os.path.dirname(configurations) # Differential configurations = os.path.dirname(configurations) # Configurations #aliases = {} # imported from samples.py: # samples, signals mc = [skey for skey in samples if skey not in ('Fake', 'DATA')] eleWP = 'mvaFall17V1Iso_WP90_tthmva_70' muWP = 'cut_Tight_HWWW_tthmva_80' eleWP_old = 'mvaFall17V1Iso_WP90' muWP_old = 'cut_Tight_HWWW' aliases['LepWPCut'] = { 'expr': 'LepCut2l__ele_mvaFall17V1Iso_WP90__mu_cut_Tight_HWWW*( (abs(Lepton_pdgId[0])==11 || Muon_mvaTTH[Lepton_muonIdx[0]]>0.8) && (abs(Lepton_pdgId[1])==11 || Muon_mvaTTH[Lepton_muonIdx[1]]>0.8) && (abs(Lepton_pdgId[0])==13 || Electron_mvaTTH[Lepton_electronIdx[0]]>0.70) && (abs(Lepton_pdgId[1])==13 || Electron_mvaTTH[Lepton_electronIdx[1]]>0.70))', 'samples': mc + ['DATA'] } aliases['gstarLow'] = { 'expr': 'Gen_ZGstar_mass >0 && Gen_ZGstar_mass < 4', 'samples': 'WgS' }
def test_called_by_current_file(self): frame = inspect.currentframe() file = Path(__file__) assert was_called_by(frame, file)
def realigner(in_bam, out_dir, max_hits=100, max_tags=-1, read_tagger_method='median', winsize=50, unstranded=False, retag=False): """The main entry for CLAM-realigner. Args: in_bam (str): filepath for input bam out_dir (str): filepath for CLAM output folder max_hits (int): maximum number of aligned loci allowed for mreads max_tags (int): maximum number of identical alignments allowed for each genomic locus, more amount will be collapsed; -1 is no collapsing read_tagger_method (str): the tagger function type winsize (int): window size unstranded (bool): ignore alignment strand info if turned on retag (bool): force to call `preprocessor` to process `in_bam` if turned on Returns: None """ # logging the parameter values frame = inspect.currentframe() args, _, _, values = inspect.getargvalues(frame) msg = 'Params:\n' for i in args: msg += "%s = %s \n"%(i, values[i]) logger.info(msg) # preprocessing if retag or not ( os.path.isfile(os.path.join(out_dir,'unique.sorted.bam')) and \ os.path.isfile(os.path.join(out_dir,'multi.sorted.bam')) \ ) : filter_bam_multihits(in_bam, max_tags=max_tags, max_hits=max_hits, out_dir=out_dir, read_tagger_method=read_tagger_method, omit_detail=True) else: logger.info("found existing bams; skipped tagging.") # file handlers if max_tags>0: mbam = pysam.Samfile(os.path.join(out_dir, 'multi.sorted.collapsed.bam'),'rb') ubam = pysam.Samfile(os.path.join(out_dir, 'unique.sorted.collapsed.bam'),'rb') else: mbam = pysam.Samfile(os.path.join(out_dir, 'multi.sorted.bam'),'rb') ubam = pysam.Samfile(os.path.join(out_dir, 'unique.sorted.bam'),'rb') obam = pysam.Samfile(os.path.join(out_dir, 'realigned.bam'), 'wb', template = mbam) chr_list=[x['SN'] for x in ubam.header['SQ']] chr_size=[x['LN'] for x in mbam.header['SQ']] chr_dict = {'name':chr_list, 'size':chr_size} # construct the `mread_dict`, this will be needed throughout the program; # also construct the genomic cluster dict and cluster to alignment, # by going through all mreads at once genomic_cluster_dict, mread_dict, location_to_reads = get_genomic_clusters(mbam, winsize=winsize, unstranded=unstranded) logger.debug('found %i mreads @ %i locations' % ( len(mread_dict), len(location_to_reads) ) ) # keep a record of processed reads processed_mreads = set() # iterate through all mreads logger.info('running em') subg_counter = 0 for read_qname in mread_dict: if read_qname in processed_mreads: continue ## construct the fully-connected subgraph for each read read_to_locations, processed_mreads = \ construct_subgraph(location_to_reads, read_qname, mread_dict, processed_mreads, chr_dict, \ genomic_cluster_dict, winsize=winsize, unstranded=unstranded) subgraph = set() for read in read_to_locations: _ = map(subgraph.add, read_to_locations[read].keys()) subgraph = list(subgraph) #if len(subgraph)==1 and len(read_to_locations)>10: # raise Exception('Incorrect mread assigned to one location') if len(subgraph)==0: continue subg_counter += 1 logger.debug("subgraph %i: |e|=%i, |v|=%i"%(subg_counter, len(read_to_locations), len(subgraph)) ) ## build the BIT tracks node_track, multi_reads_weights = \ construct_BIT_track(subgraph, read_to_locations, ubam, unstranded) ## run EM multi_reads_weights = \ run_EM(node_track, multi_reads_weights, w=winsize) ## write to obam for read in multi_reads_weights: for node in multi_reads_weights[read]: alignment = read_to_locations[read][node] score = round(multi_reads_weights[read][node][0], 3) alignment.set_tag('AS', score) #alignment.set_tag('PG', 'CLAM') obam.write(alignment) # sort the final output logger.info('sorting output') obam.close() ubam.close() mbam.close() obam_sorted_fn = os.path.join(out_dir, 'realigned.sorted.bam') pysam.sort('-o', obam_sorted_fn, os.path.join(out_dir, 'realigned.bam')) pysam.index(obam_sorted_fn) os.remove(os.path.join(out_dir, 'realigned.bam')) return
# "Optimizing Block Matrix Multiplication". # import opentuner from opentuner import ConfigurationManipulator from opentuner import IntegerParameter from opentuner import MeasurementInterface from opentuner import Result import inspect import time import math import sys import os import re try: here = os.path.dirname(inspect.getfile(inspect.currentframe())) scripts = os.path.realpath(os.path.join(here, "..", "..", "scripts")) if scripts not in sys.path: sys.path.insert(0, scripts) import libxsmm_utilities except ImportError: pass class XgemmTuner(MeasurementInterface): def manipulator(self): """ Define the search space by creating a ConfigurationManipulator """ self.granularity = 1
import os import copy import inspect configurations = os.path.realpath(inspect.getfile( inspect.currentframe())) # this file configurations = os.path.dirname(configurations) # Full2017_v6 configurations = os.path.dirname(configurations) # ggH configurations = os.path.dirname(configurations) # Configurations configurations = os.path.dirname(configurations) #aliases = {} # imported from samples.py: # samples, signals mc = [skey for skey in samples if skey not in ('Fake', 'DATA')] DH = [ skey for skey in samples if skey not in ('Fake', 'DATA', 'DY', 'top', 'WW', 'WWewk', 'ggWW', 'Vg', 'VgS', 'VZ', 'VVV', 'Higgs') ] eleWP = 'mvaFall17V1Iso_WP90' muWP = 'cut_Tight_HWWW' aliases['LepWPCut'] = { 'expr': 'LepCut2l__ele_' + eleWP + '__mu_' + muWP, 'samples': mc + ['DATA'] } aliases['LepWPSF'] = {
# Default number of threads to use when parallel computing # __NTHREADS__ = 6 __NTHREADS__ = 1 # library directory beast_envvar = "BEAST_LIBS" userhome = expanduser("~") ploc = userhome + "/.beast/" if beast_envvar in os.environ: __ROOT__ = os.environ[beast_envvar] elif os.path.isdir(ploc): __ROOT__ = ploc else: __ROOT__ = "/".join( os.path.abspath(inspect.getfile( inspect.currentframe())).split("/")[:-1]) __ROOT__ += "/libs/" # Online libraries # will be replaced by a more flexible support (JSON is easy!) libs_server = "http://www.stsci.edu/~kgordon/beast/" libs = dict(vega="vega.hd5", filters="filters.hd5", kurucz04="kurucz2004.grid.fits", tlusty09="tlusty.lowres.grid.fits", hstcovar="hst_whitedwarf_frac_covar.fits", basel22="stellib_BaSeL_v2.2.grid.fits", munari="atlas9-munari.hires.grid.fits", btsettl="bt-settl.lowres.grid.fits" # elodie31 = 'Elodie_v3.1.grid.fits' )
import os, glob, shutil, psutil, inspect, random import scipy.misc import numpy as np import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt from datetime import datetime PACK_PATH = os.path.dirname( os.path.abspath(inspect.getfile(inspect.currentframe()))) + "/.." def check_path(path): if (os.path.exists(path)): return True else: return False def check_file(filename): if (os.path.isfile(filename)): return True else: return False def check_memory():
import random import string import logging import os import subprocess import shutil import sys import inspect from urllib.parse import urlparse currentdir = os.path.dirname(os.path.abspath( inspect.getfile(inspect.currentframe()))) parentdir = os.path.dirname(currentdir) sys.path.insert(0, parentdir) from util.stringhelper import StringHelper logger = logging.getLogger(__name__) class MysqlHelper(): """ /Users/lcz>mysqldump -uroot -h47.99.73.225 -p123456 db1 > db1.sql mysqldump: [Warning] Using a password on the command line interface can be insecure. /Users/lcz>mysql -uroot -h47.99.73.225 -p123456 db2 < db1.sql mysql: [Warning] Using a password on the command line interface can be insecure. """ @staticmethod def sample(): return 'mysql://*****:*****@47.99.73.225/db1'
parser.add_argument('-d', '--debug', action='store_true', help='run in pudb') parser.add_argument("url", nargs="?", default="http://127.0.0.1:8080", help="url of the Floodlight controller") args = parser.parse_args() if args.debug: import pudb pudb.set_trace() def _test_failed(msg, lineno): print('TEST FAILED at line {}: '.format(lineno), msg) url = args.url failcnt = 0 result = list_rules(url) if not isinstance(result.get("firewall_rules"), list): _test_failed(result, getframeinfo(currentframe()).lineno) failcnt = failcnt + 1 # Delete all pre-existing rules so we know the controller's state for r in result["firewall_rules"]: result = delete_rule(url, None, r) if not result["success"]: _test_failed(result, getframeinfo(currentframe()).lineno) failcnt = failcnt + 1 result = add_rule(url, 'deny', {}) if not result["success"]: _test_failed(result, getframeinfo(currentframe()).lineno) failcnt = failcnt + 1 result = list_rules(url)
def getScriptDir(): return os.path.dirname( os.path.abspath(inspect.getfile(inspect.currentframe())))
def define_plugin_entries(groups): """ helper to all groups for plugins """ result = dict() for group, modules in groups: tempo = [] for module_name, names in modules: tempo.extend([define_plugin_entry(name, module_name) for name in names]) result[group] = tempo return result __file__ = inspect.getframeinfo(inspect.currentframe()).filename with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as long_d_f: long_description = long_d_f.read() setup_args = { 'name': "buildbot", 'version': version, 'description': "The Continuous Integration Framework", 'long_description': long_description, 'author': "Brian Warner", 'author_email': "*****@*****.**", 'maintainer': "Dustin J. Mitchell", 'maintainer_email': "*****@*****.**", 'url': "http://buildbot.net/", 'license': "GNU GPL",
''' This script uses only the weather data, for we are assuming heatload is unknown and depends strongly on weather data input. ''' ## General imports import numpy as np import pandas as pd import os, inspect # Get this current script file's directory: loc = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) # Set working directory os.chdir(loc) from myFunctions import gen_FTN_data from meSAX import * # from dtw_featurespace import * # from dtw import dtw # from fastdtw import fastdtw # to avoid tk crash import matplotlib matplotlib.use('Qt5Agg') import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D ## Load data # Selected EVs EVs = ['WeatherData.y', 'RoomLoadData.y']
def debug(session=0): """ Start debugging with fmbt-debug from the point where this function was called. Execution will stop until connection to fmbt-debug [session] has been established. Parameters: session (integer, optional): debug session that identifies which fmbt-debug should connect to this process. The default is 0. Example: - execute on command line "fmbt-debug 42" - add fmbt.debug(42) in your Python code - run the Python code so that it will call fmbt.debug(42) - when done the debugging on the fmbt-debug prompt, enter "c" for continue. """ import bdb import inspect import pdb import socket global _g_debug_conn, _g_debug_socket if not _g_debug_socket: PORTBASE = 0xf4bd # 62653, fMBD host = "127.0.0.1" # accept local host only, by default port = PORTBASE + session _g_debug_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: _g_debug_socket.bind((host, port)) _g_debug_socket.listen(1) while True: (_g_debug_conn, addr) = _g_debug_socket.accept() _g_debug_conn.sendall("fmbt.debug\n") msg = _g_debug_conn.recv(len("fmbt-debug\n")) if msg.startswith("fmbt-debug"): break _g_debug_conn.close() except socket.error: # already in use, perhaps fmbt-debug is already listening to # the socket and waiting for this process to connect try: _g_debug_socket.connect((host, port)) _g_debug_conn = _g_debug_socket whos_there = _g_debug_conn.recv(len("fmbt-debug\n")) if not whos_there.startswith("fmbt-debug"): _g_debug_conn.close() _g_debug_socket = None _g_debug_conn = None raise ValueError( 'unexpected answer "%s", fmbt-debug expected' % (whos_there.strip(), )) _g_debug_conn.sendall("fmbt.debug\n") except socket.error: raise ValueError('debugger cannot listen or connect to %s:%s' % (host, port)) if not _g_debug_conn: fmbtlog("debugger waiting for connection at %s:%s" % (host, port)) # socket.makefile does not work due to buffering issues # therefore, use our own socket-to-file converter class SocketToFile(object): def __init__(self, socket_conn): self._conn = socket_conn def read(self, bytes=-1): msg = [] rv = "" try: c = self._conn.recv(1) except KeyboardInterrupt: self._conn.close() raise while c and not rv: msg.append(c) if c == "\r": rv = "".join(msg) elif c == "\n": rv = "".join(msg) elif len(msg) == bytes: rv = "".join(msg) else: c = self._conn.recv(1) return rv def readline(self): return self.read() def write(self, msg): self._conn.sendall(msg) def flush(self): pass connfile = SocketToFile(_g_debug_conn) debugger = pdb.Pdb(stdin=connfile, stdout=connfile) debugger.set_trace(inspect.currentframe().f_back)
def api_instruction(): """ Instruction for annotators """ project = project_get_or_create() project.analytics.send(getframeinfo(currentframe()).function) return make_response(project.config['instruction'], 200)
def test_objecttypes(self): # check all types defined in Objects/ h = self.header vh = self.vheader size = self.calcsize check = self.check_sizeof # bool check(True, size(h + 'l')) # buffer with test.test_support.check_py3k_warnings(): check(buffer(''), size(h + '2P2Pil')) # builtin_function_or_method check(len, size(h + '3P')) # bytearray samples = ['', 'u' * 100000] for sample in samples: x = bytearray(sample) check(x, size(vh + 'iPP') + x.__alloc__() * self.c) # bytearray_iterator check(iter(bytearray()), size(h + 'PP')) # cell def get_cell(): x = 42 def inner(): return x return inner check(get_cell().func_closure[0], size(h + 'P')) # classobj (old-style class) class class_oldstyle(): def method(): pass check(class_oldstyle, size(h + '7P')) # instance (old-style class) check(class_oldstyle(), size(h + '3P')) # instancemethod (old-style class) check(class_oldstyle().method, size(h + '4P')) # complex check(complex(0, 1), size(h + '2d')) # code check(get_cell().func_code, size(h + '4i8Pi3P')) # BaseException check(BaseException(), size(h + '3P')) # UnicodeEncodeError check(UnicodeEncodeError("", u"", 0, 0, ""), size(h + '5P2PP')) # UnicodeDecodeError check(UnicodeDecodeError("", "", 0, 0, ""), size(h + '5P2PP')) # UnicodeTranslateError check(UnicodeTranslateError(u"", 0, 1, ""), size(h + '5P2PP')) # method_descriptor (descriptor object) check(str.lower, size(h + '2PP')) # classmethod_descriptor (descriptor object) # XXX # member_descriptor (descriptor object) import datetime check(datetime.timedelta.days, size(h + '2PP')) # getset_descriptor (descriptor object) import __builtin__ check(__builtin__.file.closed, size(h + '2PP')) # wrapper_descriptor (descriptor object) check(int.__add__, size(h + '2P2P')) # dictproxy class C(object): pass check(C.__dict__, size(h + 'P')) # method-wrapper (descriptor object) check({}.__iter__, size(h + '2P')) # dict check({}, size(h + '3P2P' + 8 * 'P2P')) x = {1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7, 8: 8} check(x, size(h + '3P2P' + 8 * 'P2P') + 16 * size('P2P')) # dictionary-keyiterator check({}.iterkeys(), size(h + 'P2PPP')) # dictionary-valueiterator check({}.itervalues(), size(h + 'P2PPP')) # dictionary-itemiterator check({}.iteritems(), size(h + 'P2PPP')) # ellipses check(Ellipsis, size(h + '')) # EncodingMap import codecs, encodings.iso8859_3 x = codecs.charmap_build(encodings.iso8859_3.decoding_table) check(x, size(h + '32B2iB')) # enumerate check(enumerate([]), size(h + 'l3P')) # file check(self.file, size(h + '4P2i4P3i3P3i')) # float check(float(0), size(h + 'd')) # sys.floatinfo check(sys.float_info, size(vh) + self.P * len(sys.float_info)) # frame import inspect CO_MAXBLOCKS = 20 x = inspect.currentframe() ncells = len(x.f_code.co_cellvars) nfrees = len(x.f_code.co_freevars) extras = x.f_code.co_stacksize + x.f_code.co_nlocals +\ ncells + nfrees - 1 check(x, size(vh + '12P3i' + CO_MAXBLOCKS * '3i' + 'P' + extras * 'P')) # function def func(): pass check(func, size(h + '9P')) class c(): @staticmethod def foo(): pass @classmethod def bar(cls): pass # staticmethod check(foo, size(h + 'P')) # classmethod check(bar, size(h + 'P')) # generator def get_gen(): yield 1 check(get_gen(), size(h + 'Pi2P')) # integer check(1, size(h + 'l')) check(100, size(h + 'l')) # iterator check(iter('abc'), size(h + 'lP')) # callable-iterator import re check(re.finditer('', ''), size(h + '2P')) # list samples = [[], [1, 2, 3], ['1', '2', '3']] for sample in samples: check(sample, size(vh + 'PP') + len(sample) * self.P) # sortwrapper (list) # XXX # cmpwrapper (list) # XXX # listiterator (list) check(iter([]), size(h + 'lP')) # listreverseiterator (list) check(reversed([]), size(h + 'lP')) # long check(0L, size(vh)) check(1L, size(vh) + self.longdigit) check(-1L, size(vh) + self.longdigit) PyLong_BASE = 2**sys.long_info.bits_per_digit check(long(PyLong_BASE), size(vh) + 2 * self.longdigit) check(long(PyLong_BASE**2 - 1), size(vh) + 2 * self.longdigit) check(long(PyLong_BASE**2), size(vh) + 3 * self.longdigit) # module check(unittest, size(h + 'P')) # None check(None, size(h + '')) # object check(object(), size(h + '')) # property (descriptor object) class C(object): def getx(self): return self.__x def setx(self, value): self.__x = value def delx(self): del self.__x x = property(getx, setx, delx, "") check(x, size(h + '4Pi')) # PyCObject # PyCapsule # XXX # rangeiterator check(iter(xrange(1)), size(h + '4l')) # reverse check(reversed(''), size(h + 'PP')) # set # frozenset PySet_MINSIZE = 8 samples = [[], range(10), range(50)] s = size(h + '3P2P' + PySet_MINSIZE * 'lP' + 'lP') for sample in samples: minused = len(sample) if minused == 0: tmp = 1 # the computation of minused is actually a bit more complicated # but this suffices for the sizeof test minused = minused * 2 newsize = PySet_MINSIZE while newsize <= minused: newsize = newsize << 1 if newsize <= 8: check(set(sample), s) check(frozenset(sample), s) else: check(set(sample), s + newsize * struct.calcsize('lP')) check(frozenset(sample), s + newsize * struct.calcsize('lP')) # setiterator check(iter(set()), size(h + 'P3P')) # slice check(slice(1), size(h + '3P')) # str check('', struct.calcsize(vh + 'li') + 1) check('abc', struct.calcsize(vh + 'li') + 1 + 3 * self.c) # super check(super(int), size(h + '3P')) # tuple check((), size(vh)) check((1, 2, 3), size(vh) + 3 * self.P) # tupleiterator check(iter(()), size(h + 'lP')) # type # (PyTypeObject + PyNumberMethods + PyMappingMethods + # PySequenceMethods + PyBufferProcs) s = size(vh + 'P2P15Pl4PP9PP11PI') + size('41P 10P 3P 6P') class newstyleclass(object): pass check(newstyleclass, s) # builtin type check(int, s) # NotImplementedType import types check(types.NotImplementedType, s) # unicode usize = len(u'\0'.encode('unicode-internal')) samples = [u'', u'1' * 100] # we need to test for both sizes, because we don't know if the string # has been cached for s in samples: check(s, size(h + 'PPlP') + usize * (len(s) + 1)) # weakref import weakref check(weakref.ref(int), size(h + '2Pl2P')) # weakproxy # XXX # weakcallableproxy check(weakref.proxy(int), size(h + '2Pl2P')) # xrange check(xrange(1), size(h + '3l')) check(xrange(66000), size(h + '3l'))
import gc from . import x86_const, arm64_const, unicorn_const as uc def monkeypatch(): # type: () -> None """ If you call monkeypatch() before importing any other unicorn-based lib, it'll "just work". Any normal `import unicorn` will from now on return unicornafl. Good for 3rd Party libs using unicorn. They won't even notice the difference - but they can now use the AFL forkserver. """ sys.modules["unicorn"] = sys.modules["unicornafl"] if not hasattr(sys.modules[__name__], "__file__"): __file__ = inspect.getfile(inspect.currentframe()) _python2 = sys.version_info[0] < 3 if _python2: range = xrange _lib = { 'darwin': 'libunicornafl.dylib', 'win32': 'unicornafl.dll', 'cygwin': 'cygunicornafl.dll', 'linux': 'libunicornafl.so', 'linux2': 'libunicornafl.so' } # Windows DLL in dependency order _all_windows_dlls = ( "libwinpthread-1.dll",
def api_remove_ml_backend(): project = project_get_or_create() ml_backend_name = request.json['name'] project.remove_ml_backend(ml_backend_name) project.analytics.send(getframeinfo(currentframe()).function) return make_response(jsonify('Deleted!'), 204)
__author__ = 'Victor Olaya' __date__ = 'July 2013' __copyright__ = '(C) 2013, Victor Olaya' # This will get replaced with a git SHA1 when you do a git archive __revision__ = '74f49ddd197ad05182f9e7ce59213663297f498e' import os import sys import inspect from processing.core.Processing import Processing from exampleprovider.ExampleAlgorithmProvider import ExampleAlgorithmProvider cmd_folder = os.path.split(inspect.getfile(inspect.currentframe()))[0] if cmd_folder not in sys.path: sys.path.insert(0, cmd_folder) class ProcessingExampleProviderPlugin(object): def __init__(self): self.provider = ExampleAlgorithmProvider() def initGui(self): Processing.addProvider(self.provider) def unload(self): Processing.removeProvider(self.provider)
import os import inspect import sys # add the 'current' directory as one where we can import modules src_dir = os.path.split( os.path.dirname(os.path.abspath(inspect.getfile( inspect.currentframe()))))[0] sys.path.append(src_dir)
import pandas as pd import glob import os import preprocessor as p import numpy as np import re from analysis.tweetprocessor import preprocessTweets as pt from analysis.tweetprocessor import processMoods as md import inspect fileDir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) # script directory filePath = fileDir.rsplit('/', 1)[0] allFiles = [] merge_path = filePath + '/train/raw/' dirs = os.listdir(merge_path) merge_path_ld = filePath + '/train/raw/party' files = os.listdir(merge_path_ld) allFiles.append(glob.glob(merge_path + "/*.csv")) allFiles.append(glob.glob(merge_path_ld + "/*.csv")) list_Modi = [] list_Rahul = [] isBJP = True target_BJP = ['@narendramodi'] target_Cong = ['@rahulgandhi'] p.set_options(p.OPT.URL, p.OPT.EMOJI, p.OPT.HASHTAG, p.OPT.MENTION) clean_BJP = []
def run(testfunc): """ Generates a `unittest.TestSuite` out of a given function `func` by creating `unittest.FunctionTestCase` tests out of all imported test stubs and passing them to a test runner. Using meta-programming, this function irreversibly replaces any test stub functions with dummy wrappers that generate `unittest.FunctionTestCase` cases to the actual stubs. Hence, it should only be used in __main__. :param testfunc: The test function to be called :rtype: Returns a tuple containing: - The number of passed testcases - The number of failed testcases - The number of skipped testcases - The logfile wherein the testrun's result summary is stored """ # get caller frame's locals frame = currentframe() localdict = frame.f_back.f_locals # setup unittest runner = generate_test.runner = MufatTestRunner(stream=MufatLogger()) suite = generate_test.suite = unittest.TestSuite() results = generate_test.results = [] # run the tests runner.stream.writeln("**** Starting: %s ****" % testfunc.__name__) startTime = datetime.now() try: testfunc.__call__() except: print "**** ABORTED! ****\n", traceback.format_exc() raise finally: # parse collected results passed = [test for result in results for test in result.passed] failures = [test for result in results for test in result.failures] errors = [test for result in results for test in result.errors] skipped = list(set(suite._tests).difference(passed)) # generate summary log file # logfile for storing run output text log_fd, log = mkstemp() with open(log, "w+") as f: print >> f, "time:", startTime.strftime("%m-%d-%Y, %H:%M:%S") print >> f, "passes:", len(passed) print >> f, "failures:", len(failures) + len(errors) print >> f, "untested:", len(skipped), "\n" passed_ = map(lambda t: t.id(), passed) # log stubs that were not run/failed for test in suite: print >> f, test.id() print >> f, getmodule(test._testFunc).__file__ print >> f, (test.id() in passed_) and "1" or "0" print >> f, getattr(test, "timeTaken", 0), "\n" os.close(log_fd) localdict["passed"] = len(passed) localdict["failed"] = len(failures) + len(errors) localdict["skipped"] = len(skipped) localdict["logfile"] = log return len(passed), len(failures) + len(errors), len(skipped), log
def main(argv): global session atexit.register(exit_handler) args_file = sys.argv[1] args_data = open(args_file).read() vfid = None arguments = shlex.split(args_data) for arg in arguments: if "=" in arg: (key, value) = arg.split("=") if key == "switch_ip": ip_addr = value elif key == "user": user = value elif key == "password": password = value elif key == "checkmode": checkmode = ast.literal_eval(value) elif key == "name": name = value elif key == "user_friendly_name": user_friendly_name = value elif key == "vfid": vfid = ast.literal_eval(value) # print json.dumps({ # "ip_addr" : ip_addr, # "user" : user, # "password" : password, # "checkmode" : checkmode # }) session = pyfos_auth.login(user, password, ip_addr, isHttps) if pyfos_auth.is_failed_login(session): print( json.dumps({ "changed": False, "login failed reason": session.get( pyfos_auth.CREDENTIAL_KEY)[pyfos_auth.LOGIN_ERROR_KEY] })) sys.exit() if vfid is not None: pyfos_auth.vfid_set(session, vfid) changed = False return_str = "user friendly name remains" result = pyfos_switchfcport.fibrechannel.get(session, name) if pyfos_util.is_failed_resp(result): print( json.dumps({ "changed": False, "line": inspect.currentframe().f_lineno, "error": result })) sys.exit() if result.peek_user_friendly_name() != user_friendly_name: changed = True return_str = "user friendly name changed to " + user_friendly_name if checkmode is False: port = pyfos_switchfcport.fibrechannel() port.set_name(name) port.set_user_friendly_name(user_friendly_name) result = port.patch(session) if pyfos_util.is_failed_resp(result): print((json.dumps({ "changed": False, "line": inspect.currentframe().f_lineno, "error": result }))) sys.exit() pyfos_auth.logout(session) print(json.dumps({"changed": changed, "return_str": return_str}))
from tensorflow.python.lib.io import file_io as _fi # Get sitepackages directories for the python installation. _site_packages_dirs = [] _site_packages_dirs += [_site.USER_SITE] _site_packages_dirs += [_p for _p in _sys.path if 'site-packages' in _p] if 'getsitepackages' in dir(_site): _site_packages_dirs += _site.getsitepackages() if 'sysconfig' in dir(_distutils): _site_packages_dirs += [_distutils.sysconfig.get_python_lib()] _site_packages_dirs = list(set(_site_packages_dirs)) # Find the location of this exact file. _current_file_location = _inspect.getfile(_inspect.currentframe()) def _running_from_pip_package(): return any( _current_file_location.startswith(dir_) for dir_ in _site_packages_dirs) if _running_from_pip_package(): for s in _site_packages_dirs: # TODO(gunan): Add sanity checks to loaded modules here. plugin_dir = _os.path.join(s, 'tensorflow-plugins') if _fi.file_exists(plugin_dir): _ll.load_library(plugin_dir) # These symbols appear because we import the python package which # in turn imports from tensorflow.core and tensorflow.python. They # must come from this module. So python adds these symbols for the
def RemoveAndAddFeatures(self, url, pathToFeatureClass, id_field, chunksize=1000): fl = None try: if arcpyFound == False: raise common.ArcRestHelperError({ "function": "RemoveAndAddFeatures", "line": inspect.currentframe().f_back.f_lineno, "filename": 'featureservicetools', "synerror": "ArcPy required for this function" }) arcpy.env.overwriteOutput = True tempaddlayer = 'ewtdwedfew' if not arcpy.Exists(pathToFeatureClass): raise common.ArcRestHelperError({ "function": "RemoveAndAddFeatures", "line": inspect.currentframe().f_back.f_lineno, "filename": 'featureservicetools', "synerror": "%s does not exist" % pathToFeatureClass }) fields = arcpy.ListFields(pathToFeatureClass, wild_card=id_field) if len(fields) == 0: raise common.ArcRestHelperError({ "function": "RemoveAndAddFeatures", "line": inspect.currentframe().f_back.f_lineno, "filename": 'featureservicetools', "synerror": "%s field does not exist" % id_field }) strFld = True if fields[0].type != 'String': strFld = False fl = FeatureLayer(url=url, securityHandler=self._securityHandler) id_field_local = arcpy.AddFieldDelimiters(pathToFeatureClass, id_field) idlist = [] print( arcpy.GetCount_management( in_rows=pathToFeatureClass).getOutput(0) + " features in the layer") with arcpy.da.SearchCursor(pathToFeatureClass, (id_field)) as cursor: allidlist = [] for row in cursor: if (strFld): idlist.append("'" + row[0] + "'") else: idlist.append(row[0]) if len(idlist) >= chunksize: allidlist.append(idlist) idlist = [] if len(idlist) > 0: allidlist.append(idlist) for idlist in allidlist: idstring = ' in (' + ','.join(idlist) + ')' sql = id_field + idstring sqlLocalFC = id_field_local + idstring results = fl.deleteFeatures(where=sql, rollbackOnFailure=True) if 'error' in results: raise common.ArcRestHelperError({ "function": "RemoveAndAddFeatures", "line": inspect.currentframe().f_back.f_lineno, "filename": 'featureservicetools', "synerror": results['error'] }) elif 'deleteResults' in results: print("%s features deleted" % len(results['deleteResults'])) for itm in results['deleteResults']: if itm['success'] != True: print(itm) else: print(results) arcpy.MakeFeatureLayer_management(pathToFeatureClass, tempaddlayer, sqlLocalFC) results = fl.addFeatures(fc=tempaddlayer) if 'error' in results: raise common.ArcRestHelperError({ "function": "RemoveAndAddFeatures", "line": inspect.currentframe().f_back.f_lineno, "filename": 'featureservicetools', "synerror": results['error'] }) elif 'addResults' in results: print("%s features added" % len(results['addResults'])) for itm in results['addResults']: if itm['success'] != True: print(itm) else: print(results) idlist = [] if 'error' in results: raise common.ArcRestHelperError({ "function": "RemoveAndAddFeatures", "line": inspect.currentframe().f_back.f_lineno, "filename": 'featureservicetools', "synerror": results['error'] }) else: print(results) except arcpy.ExecuteError: line, filename, synerror = trace() raise common.ArcRestHelperError({ "function": "create_report_layers_using_config", "line": line, "filename": filename, "synerror": synerror, "arcpyError": arcpy.GetMessages(2), }) except: line, filename, synerror = trace() raise common.ArcRestHelperError({ "function": "AddFeaturesToFeatureLayer", "line": line, "filename": filename, "synerror": synerror, }) finally: gc.collect()
rdr = csv.reader(f) angles = [] for line in rdr: angles.append(int(line[0])) f.close() angles = list(set(angles)) return angles # experience_gathering으로부터 각도셋(experience_angles.csv)으로 pickle 생성 if __name__ == '__main__': import inspect, os, csv, pickle # from multiprocessing import Pool from tensorflow.python.keras.applications.vgg16 import VGG16 current_path = inspect.getfile(inspect.currentframe()) current_dir = os.path.dirname(os.path.abspath(current_path)) EXP_PATH = os.path.join(current_dir, "experiences_parNN_feed_thread") vgg16 = VGG16(weights='imagenet') replay_memory = init_replaymemory_all_levels(EXP_PATH, current_dir, vgg16) # with open(os.path.join(EXP_PATH, 'replay_memory_newAll'), 'wb') as f: # pickle.dump(replay_memory, f) np.save('replay_memory_with_levels_0615', replay_memory) print('Done') # angles = [8,10,11,14,17,18,19,20,21,22,23,26,30,31,34,35,36,46,61,65,67,70,75] # current_path = inspect.getfile(inspect.currentframe()) # current_dir = os.path.dirname(os.path.abspath(current_path))
def validator_main(arg_vals=None, delete_rundata=True): """ Main driver utility for running validator tests The arg_vals, if passed should be array of string. A set of examples are: 1) arg_vals = ['container1'] - Use container1 to process test files from filesystem 2) args_vals = ['container1', '--db'] - Use container1 to process test documents from database. When arg_vals is None, the argparse library parses from sys.argv array list. The successful argument parsing initializes the system for the run. On exit will run cleanup. The return values of this main entry function are as: 0 - Success, tests executed. 1 - Failure, Tests execution error. 2 - Exception, missing config.ini, Mongo connection failure or http connection exception, the tests execution could not be started or completed. """ cmd_parser = argparse.ArgumentParser( "prancer", formatter_class=argparse.RawDescriptionHelpFormatter, epilog='''\ Example: prancer collection1 Runs the prancer framework based on the configuration files available in collection1 folder ''') cmd_parser.add_argument('-v', '--version', action='version', version=("Prancer %s" % __version__), help='Show prancer version') cmd_parser.add_argument( 'container', metavar='collection', action='store', help= 'The name of the folder which contains the collection of files related to one scenario' ) cmd_parser.add_argument( '--db', action='store', default=None, choices=['NONE', 'SNAPSHOT', 'FULL'], help= '''NONE - Database will not be used, all the files reside on file system, SNAPSHOT - Resource snapshots will be stored in db, everything else will be on file system, FULL - tests, configurations, outputs and snapshots will be stored in the database''' ) cmd_parser.add_argument('--crawler', action='store_true', default=False, help='Crawls and generates snapshot files only') cmd_parser.add_argument('--test', action='store', default=None, help='Run a single test in NODB mode') cmd_parser.add_argument('--customer', action='store', default=None, help='customer name for config') cmd_parser.add_argument( '--connector', action='store', default=None, help= 'specify the name of the connector which you want to run from the collection' ) cmd_parser.add_argument( '--branch', action='store', default=None, help= 'specify the name of the branch to populate snapshots, for the filesystem connector' ) args = cmd_parser.parse_args(arg_vals) retval = 2 set_customer() cfg_error, config_ini = search_config_ini() if cfg_error: return retval if args.customer: set_customer(args.customer) if args.db: if args.db.upper() in DBVALUES: args.db = DBVALUES.index(args.db.upper()) else: args.db = DBVALUES.index(SNAPSHOT) else: nodb = config_value(TESTS, DBTESTS) if nodb and nodb.upper() in DBVALUES: args.db = DBVALUES.index(nodb.upper()) else: args.db = DBVALUES.index(SNAPSHOT) if args.test: args.db = DBVALUES.index(NONE) # Check if we want to run in NO DATABASE MODE if args.db: # returns the db connection handle and status, handle is ignored. from processor.database.database import init_db, TIMEOUT _, db_init_res = init_db() if not db_init_res: msg = "Mongo DB connection timed out after %d ms, check the mongo server, exiting!....." % TIMEOUT console_log(msg, currentframe()) return retval # Check the log directory and also check if it is writeable. from processor.logging.log_handler import init_logger, get_logdir, default_logging, add_file_logging fw_cfg = get_config_data(framework_config()) log_writeable, logdir = get_logdir(fw_cfg, framework_dir()) if not log_writeable: console_log( 'Logging directory(%s) is not writeable, exiting....' % logdir, currentframe()) return retval # Alls well from this point, check container exists in the directory configured retval = 0 logger = init_logger(args.db, framework_config()) # logger = add_file_logging(config_ini) logger.info("START: Argument parsing and Run Initialization. Version %s", __version__) from processor.connector.snapshot import populate_container_snapshots from processor.connector.validation import run_container_validation_tests from processor.crawler.master_snapshot import generate_container_mastersnapshots try: from processor_enterprise.notifications.notification import check_send_notification except: check_send_notification = lambda container, db: None logger.info("Command: '%s %s'", sys.executable.rsplit('/', 1)[-1], ' '.join(sys.argv)) try: from processor.helper.config.rundata_utils import init_currentdata, \ delete_currentdata, put_in_currentdata # Delete the rundata at the end of the script as per caller, default is True. if delete_rundata: atexit.register(delete_currentdata) init_currentdata() logger.info("Using Framework dir: %s", framework_dir()) logger.info("Args: %s", args) logger.debug("Running tests from %s.", DBVALUES[args.db]) fs = True if args.db > DBVALUES.index(SNAPSHOT) else False put_in_currentdata('jsonsource', fs) put_in_currentdata(DBTESTS, args.db) put_in_currentdata('container', args.container) # if args.db == DBVALUES.index(FULL): # from processor.logging.log_handler import get_dblogger # log_name = get_dblogger() # if log_name: # pid = open('/tmp/pid_%s' % os.getpid(), 'w') # pid.write(log_name) # pid.close() if args.customer: put_in_currentdata(CUSTOMER, args.customer) if args.test: put_in_currentdata(SINGLETEST, args.test) else: put_in_currentdata(SINGLETEST, False) if args.connector: put_in_currentdata("connector", args.connector) if args.branch: put_in_currentdata("branch", args.branch) if not args.db: retval = 0 if container_exists(args.container) else 2 if retval: logger.critical( "Container(%s) is not present in Framework dir: %s", args.container, framework_dir(), extra={"type": "critical"}) # TODO: Log the path the framework looked for. return retval if args.crawler: # Generate snapshot files from here. generate_container_mastersnapshots(args.container, fs) else: # Normal flow snapshot_status = populate_container_snapshots(args.container, fs) logger.debug(json.dumps(snapshot_status, indent=2)) if snapshot_status: status = run_container_validation_tests( args.container, fs, snapshot_status) retval = 0 if status else 1 else: retval = 1 check_send_notification(args.container, args.db) except (Exception, KeyboardInterrupt) as ex: logger.error("Execution exception: %s", ex) print(traceback.format_exc()) retval = 2 return retval
def le(msg): lineno = str( inspect.getframeinfo(inspect.currentframe().f_back)[1]) #get the real line function name of where this message originated: func = inspect.getframeinfo(inspect.currentframe().f_back)[2] logger.error(msg, extra={'true_lineno': lineno, 'true_func': func})
def input(in_msg): import inspect in_msg.input_file = inspect.getfile(inspect.currentframe()) print("*** read input from ", in_msg.input_file) in_msg.sat = "cosmo" in_msg.instrument = "cosmo" in_msg.sat_nr="1" in_msg.RSS=False # better determine RSS automatically # if forecast is not yet ready, the model run before is used in_msg.delay=0 if False: # offline mode (always a fixed time) # ignores command line arguments year=2015 month=2 day=10 hour=11 minute=45 in_msg.update_datetime(year, month, day, hour, minute) # !!! if archive is used, adjust meteosat09.cfg accordingly !!! #---------------------- # choose RGBs #---------------------- #---------------- # chose RGB mode #------------------- ## satellite channels ##in_msg.RGBs.append('VIS006') # black and white ##in_msg.RGBs.append('VIS008') # black and white ##in_msg.RGBs.append('IR_016') # black and white ##in_msg.RGBs.append('IR_039') # black and white ##in_msg.RGBs.append('WV_062') # black and white ##in_msg.RGBs.append('WV_073') # black and white ##in_msg.RGBs.append('IR_087') # black and white ##in_msg.RGBs.append('IR_097') # black and white ##in_msg.RGBs.append('IR_108') # black and white ##in_msg.RGBs.append('IR_120') # black and white ##in_msg.RGBs.append('IR_134') # black and white ##in_msg.RGBs.append('HRV') # black and white #in_msg.RGBs.append('VIS006c') # colored version #in_msg.RGBs.append('VIS008c') # colored version #in_msg.RGBs.append('IR_016c') # colored version #in_msg.RGBs.append('IR_039c') # colored version #in_msg.RGBs.append('WV_062c') # colored version #in_msg.RGBs.append('WV_073c') # colored version #in_msg.RGBs.append('IR_087c') # colored version #in_msg.RGBs.append('IR_097c') # colored version #in_msg.RGBs.append('IR_108c') # colored version #in_msg.RGBs.append('IR_120c') # colored version #in_msg.RGBs.append('IR_134c') # colored version #in_msg.RGBs.append('HRVc') # colored version #------------------- # viewing geometry #------------------- #in_msg.sat = "vza" #in_msg.sat = "vaa" # satellite channel differences #in_msg.RGBs.append('WV_062-WV_073') #in_msg.RGBs.append('WV_062-IR_108') #in_msg.RGBs.append('WV_073-IR_134') #in_msg.RGBs.append('IR_087-IR_108') #in_msg.RGBs.append('IR_039-IR_108') #in_msg.RGBs.append('IR_120-IR_108') #in_msg.RGBs.append('IR_087-IR_120') #in_msg.RGBs.append('IR_120-IR_108') #in_msg.RGBs.append('trichannel') #------------------- # viewing geometry #------------------- #in_msg.RGBs.append('vza') # known bug: cant be displayed for original projection, e.g. met09globeFull #in_msg.RGBs.append('vaa') #in_msg.RGBs.append('lat') #in_msg.RGBs.append('lon') #------------------- # buil in RGBs, see http://mpop.readthedocs.org/en/latest/pp.html # or http://oiswww.eumetsat.int/~idds/html/doc/best_practices.pdf #------------------- # RED GREEN BLUE #in_msg.RGBs.append('airmass') # WV_062-WV_073 IR_097-IR_108 -WV_062 #in_msg.RGBs.append('ash') #in_msg.RGBs.append('cloudtop') #in_msg.RGBs.append('convection') # WV_062-WV_073 IR_039-IR_108 IR_016-VIS006 ##in_msg.RGBs.append('convection_co2') ##in_msg.RGBs.append('day_microphysics') # VIS008 IR_039(solar) IR_108 # requires the pyspectral modul #in_msg.RGBs.append('dust') # IR_120-IR_108 IR_108-IR_087 IR_108 #in_msg.RGBs.append('fog') #in_msg.RGBs.append('green_snow') ##in_msg.RGBs.append('ir108') #in_msg.RGBs.append('natural') # IR_016 VIS008 VIS006 #in_msg.RGBs.append('night_fog') #in_msg.RGBs.append('night_microphysics') # IR_120-IR_108 IR_108-IR_039 IR_108 #in_msg.RGBs.append('night_overview') #in_msg.RGBs.append('overview') ##in_msg.RGBs.append('overview_sun') #in_msg.RGBs.append('red_snow') ##in_msg.RGBs.append('refl39_chan') # requires the pyspectral modul ##in_msg.RGBs.append('snow') # requires the pyspectral modul ##in_msg.RGBs.append('vis06') ##in_msg.RGBs.append('wv_high') ##in_msg.RGBs.append('wv_low') #------------------- # user defined RGBs #in_msg.RGBs.append('HRoverview') ##in_msg.RGBs.append('sandwich') ##in_msg.RGBs.append('ndvi') #in_msg.RGBs.append('sza') #in_msg.RGBs.append('HRVFog') #in_msg.RGBs.append('DayNightFog') ##------------------- ## NWC SAF ##------------------- ## NWC SAF PEG 1 #in_msg.RGBs.append('CMa') #in_msg.RGBs.append('CMa_DUST') #in_msg.RGBs.append('CMa_VOLCANIC') #in_msg.RGBs.append('CMa_QUALITY') ## NWC SAF PEG 2 #in_msg.RGBs.append('CT') #in_msg.RGBs.append('CT_PHASE') #in_msg.RGBs.append('CT_QUALITY') ## NWC SAF PEG 3 in_msg.nwcsaf_calibrate=True #in_msg.RGBs.append('CTT') #in_msg.RGBs.append('CTH') #in_msg.RGBs.append('CTP') ## NWC SAF PEG 4 #in_msg.RGBs.append('CRR') ## NWC SAF PEG 5 #in_msg.RGBs.append('PC') ## NWC SAF PEG 13 #in_msg.nwcsaf_calibrate=False #in_msg.RGBs.append('sphr_bl') #in_msg.RGBs.append('sphr_cape') #in_msg.RGBs.append('sphr_diffbl') #in_msg.RGBs.append('sphr_diffhl') #in_msg.RGBs.append('sphr_diffki') #in_msg.RGBs.append('sphr_diffli') #in_msg.RGBs.append('sphr_diffml') #in_msg.RGBs.append('sphr_diffshw') #in_msg.RGBs.append('sphr_difftpw') #in_msg.RGBs.append('sphr_hl') #in_msg.RGBs.append('sphr_ki') #in_msg.RGBs.append('sphr_li') #in_msg.RGBs.append('sphr_ml') #in_msg.RGBs.append('sphr_quality') #in_msg.RGBs.append('sphr_sflag') #in_msg.RGBs.append('sphr_shw') #in_msg.RGBs.append('sphr_tpw') #in_msg.RGBs.append('SPhR_BL') # old format #in_msg.RGBs.append('SPhR_CAPE') # old format #in_msg.RGBs.append('SPhR_HL') # old format #in_msg.RGBs.append('SPhR_KI') # old format #in_msg.RGBs.append('SPhR_LI') # old format #in_msg.RGBs.append('SPhR_ML') # old format #in_msg.RGBs.append('SPhR_QUALITY') # old format #in_msg.RGBs.append('SPhR_SHW') # old format #in_msg.RGBs.append('SPhR_TPW') # old format ## NWC SAF PEG 14 #in_msg.RGBs.append('PCPh') #in_msg.RGBs.append('CRPh') #------------------- # H-SAF #------------------- #in_msg.sat = "HSAF" #in_msg.sat_nr="" #in_msg.RSS=False #in_msg.RGBs.append('h03') #------------------- # CPP (cloud physical products from KNMI) #------------------- #in_msg.sat = "cpp" #in_msg.RGBs.append('azidiff') #in_msg.RGBs.append('cth') #in_msg.RGBs.append('cldmask') #in_msg.RGBs.append('cot') #in_msg.RGBs.append('cph') #in_msg.RGBs.append('ctt') #in_msg.RGBs.append('cwp') #in_msg.RGBs.append('dcld') #in_msg.RGBs.append('dcot') #in_msg.RGBs.append('dcwp') #in_msg.RGBs.append('dndv') #in_msg.RGBs.append('dreff') #in_msg.RGBs.append('precip') #in_msg.RGBs.append('precip_ir') #in_msg.RGBs.append('qa') #in_msg.RGBs.append('reff') #in_msg.RGBs.append('satz') #in_msg.RGBs.append('sds') #in_msg.RGBs.append('sds_cs') #in_msg.RGBs.append('sds_diff') #in_msg.RGBs.append('sds_diff_cs') #in_msg.RGBs.append('sunz') #in_msg.RGBs.append('lat') #in_msg.RGBs.append('lon') #in_msg.RGBs.append('time_offset') #------------------- # msg-ot (Overshooting tops from Bedka 2016) #------------------- #in_msg.RGBs.append('ir_brightness_temperature') #in_msg.RGBs.append('ot_rating_ir') #in_msg.RGBs.append('ot_id_number') #in_msg.RGBs.append('ot_anvilmean_brightness_temperature_difference') #in_msg.RGBs.append('ir_anvil_detection') #in_msg.RGBs.append('visible_reflectance') #in_msg.RGBs.append('ot_rating_visible') #in_msg.RGBs.append('ot_rating_shadow') #in_msg.RGBs.append('ot_probability') #in_msg.RGBs.append('surface_based_cape') #in_msg.RGBs.append('most_unstable_cape') #in_msg.RGBs.append('most_unstable_equilibrium_level_temperature') #in_msg.RGBs.append('tropopause_temperature') #in_msg.RGBs.append('surface_1km_wind_shear') #in_msg.RGBs.append('surface_3km_wind_shear') #in_msg.RGBs.append('surface_6km_wind_shear') #in_msg.RGBs.append('ot_potential_temperature') #in_msg.RGBs.append('ot_height') #in_msg.RGBs.append('ot_pressure') #in_msg.RGBs.append('parallax_correction_latitude') #in_msg.RGBs.append('parallax_correction_longitude') #------------------- # COSMO (cosmo1) #------------------- in_msg.sat = "cosmo" in_msg.instrument = "cosmo" in_msg.sat_nr="1e" in_msg.pressure_levels={} #in_msg.RGBs.append('lon_1') #in_msg.RGBs.append('lat_1') #in_msg.RGBs.append('POT_VORTIC') #in_msg.RGBs.append('THETAE') #in_msg.RGBs.append('MCONV') #in_msg.RGBs.append('geopotential_height') #in_msg.RGBs.append('TWATER') #in_msg.RGBs.append('tropopause_height') #in_msg.RGBs.append('tropopause_temperature') #in_msg.RGBs.append('tropopause_pressure') #in_msg.RGBs.append('FF_10M') #in_msg.RGBs.append('VMAX_10M') #in_msg.RGBs.append('CAPE_MU') #in_msg.RGBs.append('CAPE_ML') #in_msg.RGBs.append('CIN_MU') #in_msg.RGBs.append('CIN_ML') #in_msg.RGBs.append('SLI') #in_msg.RGBs.append('LCL_ML') #in_msg.RGBs.append('LFC_ML') #in_msg.RGBs.append('T_SO') #in_msg.RGBs.append('T_2M') #in_msg.RGBs.append('TD_2M') #in_msg.RGBs.append('GLOB') #in_msg.RGBs.append('PS') #in_msg.RGBs.append('RELHUM') #in_msg.RGBs.append('PMSL') #in_msg.RGBs.append('PMSLr') #in_msg.RGBs.append('HZEROCL') #in_msg.RGBs.append('WSHEAR_0-3km') #in_msg.RGBs.append('WSHEAR_0-6km') #in_msg.RGBs.append('SYNMSG_BT_CL_IR10.8') #in_msg.RGBs.append('U') #in_msg.RGBs.append('U-100hPa') #in_msg.pressure_levels["U"]=[800,500,300] #in_msg.RGBs.append('V') #in_msg.pressure_levels["V"]=[800,500,300] #in_msg.RGBs.append('streamplot') #in_msg.RGBs.append('streamplot-300hPa') #in_msg.RGBs.append('streamplot-500hPa') #in_msg.RGBs.append('streamplot-800hPa') #in_msg.RGBs.append('T_2M') in_msg.RGBs.append('LPI') #in_msg.RGBs.append('DHAIL_AV') #in_msg.RGBs.append('DHAIL_SD') #in_msg.RGBs.append('DHAIL_MX') # experimental #in_msg.RGBs.append('clouddepth') # test according to Mecikalski, 2010 ##in_msg.RGBs.append('RII') #---------------- # chose area #---------------- in_msg.areas.append('ccs4') # CCS4 Swiss projection 710x640 #in_msg.areas.append('alps95') # area around Switzerland processed by NWCSAF software 349x151 #in_msg.areas.append('ticino') # stereographic proj of Ticino 342x311 #in_msg.areas.append('germ') # Germany 1024x1024 #in_msg.areas.append('EuropeCanary') # upper third of MSG disk, satellite at 0.0 deg East, full resolution #in_msg.areas.append('EuropeCanary95') # upper third of MSG disk, satellite at 9.5 deg East, full resolution #in_msg.areas.append('EuropeCanaryS95') # upper third of MSG disk, satellite at 9.5 deg East, reduced resolution 1000x400 #in_msg.areas.append('euro4') # Europe 4km, 1024x1024 #in_msg.areas.append('nrEURO1km') # Switzerland 1.056km for COALTION #in_msg.areas.append('euroHDready') # Europe in HD resolution 1280 x 720 #in_msg.areas.append('MSGHRVN') # High resolution northern quarter 11136x2784 #in_msg.areas.append('fullearth') # full earth 600x300 # does not yet work #in_msg.areas.append('met09globe') # Cropped globe MSG image 3620x3620 # does not yet work #in_msg.areas.append('met09globeFull') # Full globe MSG image 3712x3712 # does not yet work #in_msg.areas.append('odysseyS25') # Area of Odyssey composite (factor 2.5 smaller) #in_msg.areas.append("nrEURO1km") #in_msg.areas.append("EuroMercator") # same projection as blitzortung.org in_msg.check_RSS_coverage() # please download the shape file # in_msg.mapDir='/data/OWARNA/hau/maps_pytroll/' in_msg.mapResolution=None ## f full resolution: Original (full) data resolution. ## h high resolution: About 80 % reduction in size and quality. ## i intermediate resolution: Another ~80 % reduction. ## l low resolution: Another ~80 % reduction. ## c crude resolution: Another ~80 % reduction. # switch off Rapid scan, if large areas are wanted if ('fullearth' in in_msg.areas) or ('met09globe' in in_msg.areas) or ('met09globeFull' in in_msg.areas): in_msg.RSS=False in_msg.check_input = False #in_msg.reader_level="seviri-level4" #in_msg.parallax_correction = True #in_msg.estimate_cth=True #in_msg.parallax_gapfilling = 'bilinear' # 'False' (default), 'nearest', 'bilinear' #in_msg.save_reprojected_data=['ccs4'] in_msg.reprojected_data_filename='%(msg)s_%(area)s_%Y%m%d%H%M_nwcsaf.nc' in_msg.reprojected_data_dir='/data/COALITION2/database/meteosat/ccs4/%Y/%m/%d/' in_msg.save_statistics=False in_msg.make_plots=True in_msg.fill_value=None # black (0,0,0) / white (1,1,1) / transparent None in_msg.add_title = True in_msg.title = [" %(sat)s, %Y-%m-%d %H:%MUTC, %(area)s, %(rgb)s"] in_msg.title_y_line_nr = 1 # (INT) at which line should the title start in_msg.add_borders = True in_msg.border_color = 'red' in_msg.add_rivers = False in_msg.river_color = 'blue' in_msg.add_logos = False in_msg.logos_dir = "/opt/users/common/logos/" in_msg.add_colorscale = True in_msg.HRV_enhancement = False in_msg.outputFormats = ['png'] #in_msg.outputFormats = ['png','ninjotif'] in_msg.outputFile = 'COSMO_%(rgb)s-%(area)s_%y%m%d%H%M.png' in_msg.outputDir='./pics/' #in_msg.outputDir = "./%Y-%m-%d/%Y-%m-%d_%(rgb)s-%(area)s/" #in_msg.outputDir = '/data/cinesat/out/' in_msg.outputDir = '/data/COALITION2/PicturesSatellite/%Y-%m-%d/%Y-%m-%d_%(rgb)s_%(area)s/' in_msg.compress_to_8bit=False in_msg.ninjotifFilename = 'MET%(sat_nr)s_%(RSS)s_%(rgb)s_%(area)s_%Y%m%d%H%M.tif' in_msg.upload_ninjotif = False #in_msg.postprocessing_areas=['ccs4'] #in_msg.postprocessing_areas=['EuropeCanaryS95'] #in_msg.postprocessing_areas=["EuroMercator"] #in_msg.postprocessing_montage = [["MSG_IR-108c","COSMO_SYNMSG-BT-CL-IR10.8"]] in_msg.postprocessing_montage = [["MSG_IR-108cpc","COSMO_SYNMSG-BT-CL-IR10.8"]] #in_msg.resize_montage = 70 #in_msg.resize_composite = 100 in_msg.scpOutput = False
def AddFeaturesToFeatureLayer(self, url, pathToFeatureClass, chunksize=0, lowerCaseFieldNames=False): if arcpyFound == False: raise common.ArcRestHelperError({ "function": "AddFeaturesToFeatureLayer", "line": inspect.currentframe().f_back.f_lineno, "filename": 'featureservicetools', "synerror": "ArcPy required for this function" }) fl = None try: fl = FeatureLayer(url=url, securityHandler=self._securityHandler) if chunksize > 0: fc = os.path.basename(pathToFeatureClass) inDesc = arcpy.Describe(pathToFeatureClass) oidName = arcpy.AddFieldDelimiters(pathToFeatureClass, inDesc.oidFieldName) arr = arcpy.da.FeatureClassToNumPyArray( pathToFeatureClass, (oidName)) syncSoFar = 0 messages = {'addResults': [], 'errors': []} total = len(arr) errorCount = 0 if total == '0': print("0 features in %s" % pathToFeatureClass) return "0 features in %s" % pathToFeatureClass print("%s features in layer" % (total)) arcpy.env.overwriteOutput = True if int(total) < int(chunksize): return fl.addFeatures( fc=pathToFeatureClass, lowerCaseFieldNames=lowerCaseFieldNames) else: newArr = chunklist(arr, chunksize) exprList = [ "{0} >= {1} AND {0} <= {2}".format( oidName, nArr[0][0], nArr[len(nArr) - 1][0]) for nArr in newArr ] for expr in exprList: UploadLayer = arcpy.MakeFeatureLayer_management( pathToFeatureClass, 'TEMPCOPY', expr).getOutput(0) #print(arcpy.GetCount_management(in_rows=UploadLayer).getOutput(0) + " features in the chunk") results = fl.addFeatures( fc=UploadLayer, lowerCaseFieldNames=lowerCaseFieldNames) chunkCount = arcpy.GetCount_management( in_rows=UploadLayer).getOutput(0) print(chunkCount + " features in the chunk") if chunkCount > 0: if results is not None and 'addResults' in results and results[ 'addResults'] is not None: featSucces = 0 for result in results['addResults']: if 'success' in result: if result['success'] == False: if 'error' in result: errorCount = errorCount + 1 print("\tError info: %s" % (result)) else: featSucces = featSucces + 1 syncSoFar = syncSoFar + featSucces print("%s features added in this chunk" % (featSucces)) print("%s/%s features added, %s errors" % (syncSoFar, total, errorCount)) if 'addResults' in messages: messages['addResults'] = messages[ 'addResults'] + results['addResults'] else: messages['addResults'] = results[ 'addResults'] else: messages['errors'] = result return messages else: return fl.addFeatures(fc=pathToFeatureClass, lowerCaseFieldNames=lowerCaseFieldNames) except arcpy.ExecuteError: line, filename, synerror = trace() raise common.ArcRestHelperError({ "function": "AddFeaturesToFeatureLayer", "line": line, "filename": filename, "synerror": synerror, "arcpyError": arcpy.GetMessages(2), }) except: line, filename, synerror = trace() raise common.ArcRestHelperError({ "function": "AddFeaturesToFeatureLayer", "line": line, "filename": filename, "synerror": synerror, }) finally: fl = None del fl gc.collect()