def plot_fmt(*data, style='unknown'): """ Draw dut data with its style. Arguments: data - Datasets to be plot. Accept 1 or 2 arrays as y or x, y. style - Set the marker and line style. """ if style == 'p': m_marker = '.' m_style = '' elif style == 'l': m_marker = '' m_style = '-' else: m_marker = '.' m_style = '-' try: plt.plot(*data, color='#0652FF', marker=m_marker, markersize=4, linestyle=m_style, linewidth=1) return True except ValueError as e: if uti.is_substring("x and y must have same first dimension", e.args[0]): logger().exception("The number of x and y values are not match!") else: logger().exception( "Failed to plot DUT. There might be a format error in the dut db file." ) return False
def insert_data(table_name, csv_str, log_file): time_now = datetime.now() date_str = time_now.strftime("%Y-%m-%d") #read csv and transfer to database csv_str = csv_str.replace('\\', '/') df = pd.read_csv(csv_str, names=[ 'ori_id', 'ori_name', 'ori_address', 'ori_vi', 'ori_su', 'ori_di', 'ori_pr' ], skiprows=1, sep=',', index_col=False) df["flag_geocode"] = 'Q' df["ori_file"] = csv_str df["modified"] = date_str engine = engine_sqlalchemy() try: df.to_sql(name=table_name, con=engine, if_exists='append', index=False) except sqlalchemy.exc.DataError: pass logger(log_file, 'debug', "sqlalchemy - DataError") print("sqlalchemy - DataError") except sqlalchemy.exc.IntegrityError: pass logger(log_file, 'warning', "sqlalchemy - data already exist, will be skipped") print("sqlalchemy - data already exist, will be skipped") #capitalize-ing data sql = upperData(table_name) execScalar(sql)
def read_last_parameters(filename): """ Read parameters from file. Arguments: filename - Filename to read from. Return Values: last_product_number, last_tester, req_filename, path_Golden, path_DUT, output_dir """ parameters = [] try: with open(filename, 'r', encoding="utf-8") as para_file: for m_line in para_file: line = m_line.strip() if not line: continue para = line.split(': ') if len(para) != 2: continue parameters.append(para[1]) except IOError: logger().warning("Open %s failed - File does not exist." % filename) return parameters
def download_pdf(arxiv_id): """ 从中国镜像下载pdf文件 注意可能会下载失败 :param arxiv_id: :return: 0: 下载失败 1: 下载成功 2: 文件已经存在 """ basename = arxiv_id[6:] + '.pdf' fname = os.path.join(file_path, basename) if os.path.isfile(fname): # 如果文件存在,跳过 logger.info(fname + "文件存在,跳过该文件") return 2 pdf_url = 'http://arxiv.org/pdf/' + basename logger.info("使用镜像1中" + pdf_url) try: res = r.get(pdf_url, headers=get_header()) with open(fname, 'wb+') as f: f.write(res.content) fsize = os.path.getsize(fname) f_kb = fsize / float(1024) if f_kb <= 10: logger.error("下载文件过小") pdf_url = 'http://arxiv.org/ftp/arxiv/papers/' + basename[:4] + '/' + basename logger.error("使用镜像2" + pdf_url) res = r.get(pdf_url, headers=get_header()) with open(fname, 'wb+') as f: f.write(res.content) return 0 return 1 except Exception as e: logger('error downloading: ' + pdf_url) return 0
def cleansing(table_name, log_file): query = fetchCleanseAddress(table_name) data = fetch(query) for row in data: idx = row[0] ori_address = row[1] or '' logger(log_file, 'info', "Cleansing %i: %s" % (idx, ori_address)) print("Cleansing %i: %s" % (idx, ori_address)) fixed_address_1 = cleansingGeocode(ori_address) query = updateCleanseAddress(table_name, 'cleanse_address_1', fixed_address_1, idx) execScalar(query) fixed_address_2 = cleansingGeocodeRetry(ori_address) if fixed_address_2 == fixed_address_1: pass else: query = updateCleanseAddress(table_name, 'cleanse_address_2', fixed_address_2, idx) execScalar(query) fixed_address_3 = cleansingGPlaces(ori_address) query = updateCleanseAddress(table_name, 'cleanse_address_3', fixed_address_3, idx) execScalar(query) logger(log_file, 'info', "All data has been cleansed") print("\tAll data has been cleansed")
def wrong(self, event=None): cur_item = self.tree.get_cur_item() cur_entry = self.get_entry(cur_item) cur_entry.set_wrong() m_comment = AskString(parent=self.master, title="Comment Required", message="Please input your comment:").go() if m_comment and m_comment.strip(): """ if cur_entry.etype == CheckEntry.TABLE: cur_entry.set_ref(cur_entry.get_t_data()) else: path = report.save_figure(cur_entry.title) cur_entry.set_ref(path) """ if cur_entry.ref is None: cur_entry.save() logger().debug("NOK graph comment: %s", m_comment) cur_entry.set_comment(m_comment) self.set_conclusion(self.tree.get_cur_iid(), "NG") self.tree.set_tag(self.tree.get_cur_iid(), status='wrong') self.update_color() # Move on to next item next_item = self.tree.get_next_item() if next_item: self.tree.go_next() # next_entry = self.get_entry(next_item) # self.show_plot(next_entry) else: Alert(parent=self.master, title='Warning', message='Please input comments first!').go()
def store_parameter(product_number, tester, req_file, golden_file, dut_file, output_dir): """ Store the parameters to module report. Arguments: product_number - product number of DUT tester - tester of DB check req_file - requirement file golden_file - golden file dut_file - dut file output_dir - output directory """ global g_product_number global g_tester global g_req_file global g_golden_file global g_dut_file global g_output_dir global result_path g_product_number = product_number g_tester = tester g_req_file = req_file g_golden_file = golden_file g_dut_file = dut_file g_output_dir = output_dir if not g_output_dir: g_output_dir = os.getcwd() result_path_suffix = time.strftime("_%Y%m%d_%H%M%S") result_path = os.path.join(g_output_dir, "result" + result_path_suffix) logger().info("result_path= %s" % result_path) uti.create_dir(result_path)
def serialization(self): """ serialize to disk """ logger(verbose.WARNING).feed(" The model will be save to path: ", self.save_file_path) if not os.path.exists(os.path.dirname(self.save_file_path)): os.makedirs(os.path.dirname(self.save_file_path)) self.graph_io.serialization(self.save_file_path)
def _on_closed(self): """ Called when quit button clicked. """ var_box = Confirm(parent=self.master, title='Info', message='Are you sure to quit?').go() if var_box: logger().warning("Execution abort due to user operation!") exit(0)
def create_db_connection(logger,hostname,database,username,password,port): try: connection = psycopg2.connect( database = database, user = username, password = password, host = hostname, port = port) return connection except: logger("Can't connect to redshift database.",'exception','root')
def read_data(dictionary, cmd): """ Read the data of the command in the dictionary. Arguments: dictionary - The dictionary contains commands and data. cmd - The command to search. return - The data of the command. """ if cmd in dictionary: return dictionary[cmd].data logger().error("Failed to find this command: %s" % cmd) return
def output_callback(): """ Open output path selection window. """ global output_entry global output_dir output_dir = filedialog.askdirectory(title='select the output directory', initialdir=output_entry.get()) logger().debug("Output path selected: %s", output_dir) if output_dir != "": output_entry.delete(0, tk.END) output_entry.insert(0, output_dir)
def golden_callback(): """ Open golden file select window. """ global golden_entry global path_Golden path_Golden = filedialog.askopenfilename(title='select the old golden DB file', filetypes=[('text file', '*.txt'), ('All Files', '*')], initialdir=os.path.dirname(golden_entry.get())) logger().debug("Golden file selected: %s", path_Golden) if path_Golden != "": golden_entry.delete(0, tk.END) golden_entry.insert(0, path_Golden)
def parse_interactive(line): global service parser = create_parser() logger().debug("COMMAND ENTERED: " + line) args = shlex.split(line) for i in range(len(args)): if args[i] in [ "listen", "list_victims", "list", "rename_victim", "rename", "get_machine_info", "minfo", "get_file", "send_file", "execute_command", "exec", "execute_file", "execf", "help", "exit", "delete", "download_files", "download" ]: args[i] = "--" + args[i] parsed = vars(parser.parse_args(args)) if parsed["help"]: logger().info(parser.format_help()) elif parsed["exit"]: do_exit(0) elif parsed["listen"]: server_proc = send_kill() if server_proc is not None: stop_notif_server() server_proc.join() logger().info("Notification server stopped.") else: start_listen(NOTIF_SERVER_URL) elif parsed["list"]: logger().info("Victims:\n{}".format("\n".join( get_victim_list(service)))) else: commands = parse_arguments(service, parsed, interactive=True) send_commands(service, commands)
def dut_callback(): """ Open dut data file select window. """ global dut_entry global path_DUT path_DUT = filedialog.askopenfilename(title='select the new DUT DB file', filetypes=[('text file', '*.txt'), ('All Files', '*')], initialdir=os.path.dirname(dut_entry.get())) logger().debug("DUT data file selected: %s", path_DUT) if path_DUT != "": dut_entry.delete(0, tk.END) dut_entry.insert(0, path_DUT)
def req_callback(): """ Open requirements file select window. """ global req_entry global req_filename req_filename = filedialog.askopenfilename(title='select the DB requirement file', filetypes=[('text file', '*.txt'), ('All Files', '*')], initialdir=os.path.dirname(req_entry.get())) logger().debug("Requirement file selected: %s", req_filename) if req_filename != "": req_entry.delete(0, tk.END) req_entry.insert(0, req_filename)
def _mllog_print(logger, *args, **kwargs): """ Wrapper for MLPerf compliance logging calls. All arguments but 'sync' are passed to mlperf_log.mllog_print function. If 'sync' is set to True then the wrapper will synchronize all distributed workers. 'sync' should be set to True for all compliance tags that require accurate timing (RUN_START, RUN_STOP etc.) """ if kwargs.pop('sync', False): barrier() if 'value' not in kwargs: kwargs['value'] = None if get_rank() == 0: logger(*args, **kwargs, stack_offset=3)
def find_csv(working_folder, log_file): check_dir = print(os.path.isdir(working_folder)) if check_dir == False: pass logger(log_file, 'warning', "Directory not exist, please check it again!") print("Directory not exist, please check it again!") else: list_csv = [] for root, dirs, files in os.walk(working_folder, "*.csv"): for file in fnmatch.filter(files, "*.csv"): data = os.path.join(root, file) list_csv.append(data) return list_csv
def read_params(logger): try: logger('Starting reading configuration....', 'info', 'root') profile = 'DEV' if 'MACHINE_ENV' in os.environ: profile = os.environ['MACHINE_ENV'] config = read_config(profile=profile) params = {} params['target_s3_path'] = config['aws']['s3_path'] table_config = json.load(open("tables.json")) params['tables'] = table_config logger("s3 bucket path - " + params['target_s3_path'], 'info', 'root') print(config) max_process = multiprocessing.cpu_count() if (max_process <= config['aws']['dop'] or config['aws']['dop'] < 0): logger( "Setting degree of parallelism to cpu count " + str(max_process), 'info', 'root') params['degree_of_parallelism'] = max_process else: params['degree_of_parallelism'] = config['aws']['dop'] return params except Exception as e: logger("Config file couldn't load", 'exception', 'root')
def Insert_Data_Face(fname, lname, encoding, isfriend): # SQL insert name and facial landmarks conn, cursor = Mysql_Init() #print (fname+ '\t'+ lname+'\t'+str(isfriend) +'\n'+str(encoding)) insert_sql = "insert into face(fname,lname,encoding,isfriend) values(%s,%s,%s,%s)" #print(insert_sql) try: cursor.execute(insert_sql, (fname, lname, encoding, isfriend)) conn.commit() logger(" A new face added to table face " + fname + " " + lname, 'info', 'database') except Exception as e: conn.rollback() DataBase_Close(cursor, conn)
def start_listen_thread(ServerHandler, host, port): '''启动服务器线程''' global server logger(__file__, 'Server Work ' + host + ':' + str(port)) # start listen socket try: server = ThreadedTCPServer((host, port), ServerHandler) server_thread = threading.Thread(target = server.serve_forever) server_thread.setDaemon(True) server_thread.start() except Exception, data: error = str(data) logger(__file__, error.decode('gbk'))
def Create_Table_Check(): conn, cursor = Mysql_Init() sql = """create table check_table( id int primary key auto_increment, fname varchar(45) not null, lname varchar(45) not null, date date not null, time time not null)charset utf8; """ try: cursor.execute(sql) conn.commit() logger(" table check_table created", 'info', 'database') except: conn.rollback() DataBase_Close(cursor, conn)
def __init__(self): # Database設定の読み込み self.log = logger(logger) try: database_settings = json.load( open(".\\etc\\database_settings.json", 'r')) lis = database_settings["oracle"]["lis"] except: self.log.error("database設定の読み込み時に例外が発生しました。") self.log.error(sys.exc_info()) traceback.print_exc() sys.exit(False) os.environ["NLS_LANG"] = lis["lang"] tns = cx_Oracle.makedsn(lis["server_address"], lis["port"], lis["instance_name"]) try: self.conn = cx_Oracle.connect(lis["username"], lis["password"], tns) self.cur = self.conn.cursor() except: self.log.error("Oracleの接続で例外が発生しました。") self.log.error(sys.exc_info()) traceback.print_exc() sys.exit(False) finally: self.log.info("Oracleの接続に成功しました。")
def connect_vertica_db(logger,params): try: host = params['host'] port = params['port'] username = params['username'] password = params['password'] if not password: password = ssm_pass(params['ssm_name']) db = params['db_name'] conn_info = {'host': host, 'port': port, 'user': username, 'password': password, 'database': db, 'read_timeout': 600, 'unicode_error': 'strict', 'ssl': False, 'connection_timeout': 5} connection = vertica_python.connect(**conn_info) return connection except Exception as e: logger("Can't connect to vertica database.",'exception','root')
def handle_bad_folders(folders, force=False): """ Checks if every folder in the list given exists. If not : exits if force is false, else it is removed from the list :param folders: folders list to check :param force: boolean (default : False) :return: list of correct folders """ bad_folders = [str(x) for x in folders if not isdir(x)] if bad_folders and not force: logger().get_logger().error( "Some of the directories you gave are wrong, please check :\n {0}".format( '\n '.join(bad_folders))) exit(1) elif bad_folders and force: folders = [x for x in folders if x not in bad_folders] return folders
def assign_sonar_issue(issue): try: credentials = str( base64.b64encode(bytes(sonar_user + ":" + sonar_pass, 'utf-8'))).replace('b\'', '').replace('\'', '') headers = {"Authorization": ('Basic ' + credentials)} response = requests.post(sonar_base_url + '/api/issues/assign?issue=' + issue + '&assignee=' + sonar_user, headers=headers) print('Codigo de assign da issue no Sonar: ' + str(response.status_code)) except Exception as e: logger(e)
def check_version(): """ Get the newest version on the server. """ try: a = os.listdir(path) version = '' for name in a: logger().debug(name) if name.startswith('version'): version = name.split('_')[1] break return version except FileNotFoundError: logger().warning("Update path not exist %s" % path) exit(0)
def Create_Table_Unknown(): conn, cursor = Mysql_Init() sql = """create table unknown ( id int auto_increment primary key, name varchar(40) not null, encoding blob not null, date date not null, time time not null, filename varchar(80) not null )charset utf8;""" try: cursor.execute(sql) conn.commit() logger(" table unknown created", 'info', 'database') except: conn.rollback() DataBase_Close(cursor, conn)
def Create_Table_Users(): conn, cursor = Mysql_Init() sql = """create table users( id int primary key auto_increment, username varchar(45) not null, password varchar(192) not null, encoding blob not null, gid int default 1 )charset utf8; """ try: cursor.execute(sql) conn.commit() logger(" table users created", 'info', 'database') except: conn.rollback() DataBase_Close(cursor, conn)
def __init__(self,url): self.url=url self.domain='' self.html_code='' self.links_list = [] self.title = '' self.log =logger() self.page_text = ''
def save_report(): """ Save the report document. """ global result_path global report_file dir_existed = os.path.isdir(result_path) if not dir_existed: uti.create_dir(result_path) report_file = os.path.join(result_path, "DbCheckReport.docx") try: document.save(report_file) except PermissionError: logger().exception('Report file has been opened by another program.') os.sys.exit(1) else: logger().info("%s is saved successfully." % os.path.abspath(report_file))
def get_remotefile(filename=None): # scp the science image # build the scp command; note spaces between each parameter # use -v to debug the scp/ssh #cmd ='scp -v -i ~/.ssh/id_dsa_nk ' \ command ='time scp -i ~/.ssh/id_dsa_nk ' \ + ' ' + host + filename \ + ' ' + stagepath +'/. ' logdata=command logger(flog, logdata) itry=0 Transfered=False trace = traceback.extract_stack()[-1] print(os.path.basename(trace[0]), ' line :', str(trace[1])) while (itry < iretry_max) and not Transfered: itry=itry+1 if debug or verbose: print(command) #result=os.popen(command) #help(result) result=subprocess.Popen(command, stdout=subprocess.PIPE,stderr=subprocess.PIPE, shell=True) print('Using subprocess.Popen and communicate') output, errors = result.communicate() #result=subprocess.check_output(command) #help(result) print('subprocesss output: ', output) print('subprocess errors: ', errors) #while True: # line = errors.readline() # if line == "": break # logdata = line # logger(flog, logdata) # print(line,) # if debug: key=raw_input("Enter any key to continue: ") # check the files was transfered ipos=filename.rfind('/') imagefile = stagepath + filename[ipos+1:] if os.access(imagefile, os.F_OK): Transfered=True if not os.access(imagefile, os.F_OK): scpfailurefile=outfile + '.scpfailure' scpfailurefileh = open(scpfailurefile, 'wt') logdata='WARNING: image file NOT transfered: %s' % imagefile logger(flogerr, logdata) Transfered=False delay=delaytime*(itry*itry) logdata='WAITING: %s seconds' % delay logger(flog, logdata) time.sleep(delay) continue
def main(): ''' main procedure to extract features for all states ''' log, s3clnt, s3folder = logger('feature'), s3_helper(), 'feature_u' for f in ['log', s3folder]: if not os.path.exists(f): os.makedirs(f) log.start() # connect to MongoDB and get collections m_url = 'ec2-52-53-173-200.us-west-1.compute.amazonaws.com' client = MongoClient(m_url, 27017) plan_col = client.plans.plans drug_col = client.formularies.drugs prov_col = client.providers.providers faci_col = client.providers.facilities log.trace('connected to MongoDB at %s' %m_url) # parse out plan ID for states all_plan = plan_col.distinct('plan_id') #drug_col.distinct('plans.plan_id') state_ids = np.unique([i[5:7] for i in all_plan]) log.trace('find plan from %d states: %s' %(len(state_ids), ', '.join(state_ids))) # run procedure for each state failure = [] for state in state_ids: try: if state == '': continue state_plan = [i for i in all_plan if state in i] log.trace('processing %d plans for %s' %(len(state_plan), state)) plan, feature = get_state_feature(state_plan, plan_col, drug_col, prov_col, log) log.trace('completed feature extraction for %d plans, with dimension %s' %(len(plan), str(feature.shape))) # savee pickle to s3 save_name = '%s/%s_%d_%d.pickle' %(s3folder, state, feature.shape[0], feature.shape[1]) with open(save_name, 'w') as f: pickle.dump([feature, plan], f) s3clnt.delete_by_state('%s/%s' %(s3folder, state)) s3clnt.upload(save_name) log.trace('feature pickle saved to s3, complete for %s' %state) except Exception as ex: traceback.print_exc(file=log.log_handler()) failure.append(state) log.error('feature extraction has encountered error for state %s' %state) log.trace('feature extraction completed, failed for %d states: %s' %(len(failure), ', '.join(failure))) log.stop() client.close()
def main(): ''' ''' next_run, hour, minute = datetime.now(), 1, 18 s3_fea, test = 'feature_d', False log, ready = logger('training'), do_setup(s3_fea) while ready: # cyclic execution if datetime.now() < next_run: time.sleep((next_run-datetime.now()).total_seconds()) continue # get click-through data try: log.start() click_data = get_click_data(log) if not test else simulate_clicks() except Exception as ex: traceback.print_exc(file=log.log_handler()) log.error('error in getting click data, retry in %d minutes.' %minute) next_run = datetime.now() + timedelta(minutes=minute) log.stop() continue # train for each state failure, all_states = [], np.unique(click_data['state']) for state in all_states: try: train_one_state(click_data, state, log, s3_fea) except KeyboardInterrupt: log.stop() sys.exit('User termination') except Exception as ex: failure.append(state) traceback.print_exc(file=log.log_handler()) log.trace('training has encountered an error for state %s' %state) # training completed, get next run time next_run = datetime.now() + timedelta(hours=hour) done_msg = 'training has completed for %d states, failed for %d states: %s' %(len(all_states)-len(failure), len(failure), str(failure)) log.trace(done_msg) log.trace('next run time is %s, so long!' %str(next_run)) log.stop() print '%s: %s' %(str(datetime.now()), done_msg)
nfilesmax = 1000 nearest = 1 parser = OptionParser() from time import strftime, gmtime, sleep pid = os.getpid() hostname = socket.gethostname() time_str = strftime("%Y-%m-%dT%H-%M-%S", gmtime()) flog = open("Log_wsa_crossid_bulk_%s.txt" % time_str, "wt") flogerr = open("Logerr_wsa_crossid_bulk_%s.txt" % time_str, "wt") logdata = "Start logfile" logger(flog, logdata) logdata = "Start error logfile" logger(flogerr, logdata) logdata = "hostname: " + hostname logger(flog, logdata) logdata = "pid: " + str(pid) logger(flog, logdata) logdata = "__name__ = " + __name__ logger(flog, logdata) logdata = "Current working directory: %s" % (os.getcwd()) logger(flog, logdata)
def imcore_list_run(args=None, imagefile=None, confmapfile=None, listfile=None, outfile=None): """ runs imcore_list using subprocess """ # set the imcorelist parameters; maybe need to check these againsts # values in the header of the catalogue file nustep=-1 print('args.vhs: ', args.vhs) print('args.cache: ', args.cache) if not args.vhs: nustep=pyfits.getval(imagefile,'NUSTEP',0) print('nustep = ', nustep) if args.vhs: rcore = "3.0" nbsize = "64" threshold = "1.5" # UKIDSS values if (nustep==1): rcore = "2.5" nbsize = "64" threshold = "1.5" elif (nustep==4): # i.e., 2x2 rcore = "5.0" nbsize = "128" threshold = "1.25" elif (nustep==9): # i.e., 3x3 rcore = "7.5" nbsize = "192" threshold = "1.25" print('rcore, nbsize, threshold: ', rcore, nbsize, threshold) # needs the binary location to be a parameter for portability #command = 'time nice -n19 ' + IMCORE_LIST \ command = 'time nice -n19 /home/rgm/bin/imcore_list ' \ + ' ' + imagefile \ + ' ' + confmapfile \ + ' ' + listfile \ + ' ' + outfile \ + ' ' + threshold \ + ' --nbsize=' + nbsize \ + ' --rcore=' + rcore \ + ' --cattype=6 ' # + ' --verbose ' # save stdout and stderr to a logfile stdoutlog = open(logpath+'Logfile_stdout', 'w+') stderrlog = open(logpath+'Logfile_stderr', 'w+') logdata=command logger(flog, logdata) # convert comamd to string list for subprocess command = shlex.split(command) print('Print command as args: ') print(command) result = subprocess.call(command, \ stderr=stderrlog, stdout=stdoutlog) Popen=False if Popen: result = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) print('Using subprocess.Popen and communicate') output, errors = result.communicate() result=subprocess.check_output(command) #help(result) print('subprocesss output: ', output) print('subprocess errors: ', errors) #logdata='subprocess error status: ' + str(result) #logger(flog, logdata) trace = traceback.extract_stack()[-1] print(os.path.basename(trace[0]), ' line :', str(trace[1])) #if result is not 0: print(len(result)) #print(result) result=0 if result is not 0: print(result) trace = traceback.extract_stack()[-1] print(os.path.basename(trace[0]), ' line :', str(trace[1])) print('Something went wrong: ', args) logger(flogerr, logdata) logdata = 'Something went wrong: ' + command logger(flogerr, logdata) logger(flogerr, line_save) logger(flogerr, command) while True: line = result.readline() if line == "": break logdata = line logger(flog, logdata) print(line,) if debug: key=raw_input("Enter any key to continue: ") # delete the outfile if created if os.path.exists(outfile): logdata = "Delete %s " % outfile logger(flog, logdata) logger(flogerr, logdata) os.remove(outfile) if os.path.exists(lockfile): logdata = "Delete lockfile %s " % lockfile logger(flog, logdata) logger(flogerr, logdata) os.remove(lockfile) #continue usage = resource.getrusage(resource.RUSAGE_CHILD) for name, desc in [ ('ru_utime', 'User time'), ('ru_stime', 'System time'), ]: print('%-25s (%-10s) = %s' % (desc, name, getattr(usage, name))) else: logdata='imcore_list Finished' logger(flog, logdata) print('Read back the results and check integrity: ', outfile) if not os.path.exists(outfile): print('File does not exist: ', outfile) try: hdulist = pyfits.open(outfile) except: traceback.print_exc(file=sys.stdout) logdata= 'Number of extensions: %d ' % len(hdulist) logger(flog, logdata) n_ext=len(hdulist) for ext in range(1, n_ext): table_stats(outfile, ext=ext) print('listdriven photometry completed')
def get_vista_pawprints(imagefile=None, filename=None, stagepath=None, SelfTest=False): """ Under developement option to allow pawprint processing """ # basic scp with out retry print('filename: ', filename) pathname = os.path.dirname(filename) hdulist = pyfits.open(imagefile) header=hdulist[1].header print() print('PROV files:') #print(header['PROV0000']) list=[header['PROV0001']] list.append(header['PROV0002']) list.append(header['PROV0003']) list.append(header['PROV0004']) list.append(header['PROV0005']) list.append(header['PROV0006']) #print(header['PROV0001']) #print(header['PROV0002']) #print(header['PROV0003']) #print(header['PROV0004']) #print(header['PROV0005']) #print(header['PROV0006']) #print() print(list) #key=raw_input("Enter any key to continue: ") for image in list: filename = pathname + '/' + image command ='scp -i ~/.ssh/id_dsa_nk ' \ + ' '+ host + filename \ + ' ' + stagepath +'/. ' logdata=command logger(flog, logdata) trace = traceback.extract_stack()[-1] print(os.path.basename(trace[0]), ' line :', str(trace[1])) #result=os.popen(command) result=subprocess.Popen(command) # create confidence map filename from the image filename confname = filename[:-4]+'_conf.fit' command ='scp -i ~/.ssh/id_dsa_nk ' \ + ' '+ host + confname \ + ' ' + stagepath +'/. ' logdata=command logger(flog, logdata) trace = traceback.extract_stack()[-1] print(os.path.basename(trace[0]), ' line :', str(trace[1])) #result=os.popen(command) result=subprocess.Popen(command) if SelfTest: catfile = files[0][0:-4] + '_cat.fits' print('SelfTest catalogue file: ' + catfile) command ='time scp -i ~/.ssh/id_dsa_nk ' \ + ' '+ host + catfile \ + ' ' + stagepath +'/. ' logdata=command logger(flog, logdata) if debug: print(command) #result=os.popen(command) result=subprocess.Popen(command) while True: line = result.readline() if line == "": break logdata = line logger(flog, logdata) print(line,) if debug: key=raw_input("Enter any key to continue: ") print('SelfTest File transferred: ' + catfile) #key=raw_input("Enter any key to continue: ") return list
def get_file(host=None, infile=None, transport='scp'): """ scp file from remote host this could also be a http etc """ # build the scp command; note spaces between each parameter # use -v to debug the scp/ssh #cmd ='scp -v -i ~/.ssh/id_dsa_nk ' scp_verbose='-v' scp_verbose='' # ssh key location could be in cfg command ='time scp ' + scp_verbose + ' -i ~/.ssh/id_dsa_nk ' \ + ' '+ host + infile \ + ' ' + stagepath +'/. ' logdata=command logger(flog, logdata) itry=0 Transfered=False trace = traceback.extract_stack()[-1] print(os.path.basename(trace[0]), ' line :', str(trace[1])) while (itry < iretry_max) and not Transfered: itry=itry+1 if debug: print(command) #result=os.popen(command) #help(result) result=subprocess.Popen(command, stdout=subprocess.PIPE,stderr=subprocess.PIPE, shell=True) output, errors = result.communicate() #result=subprocess.check_output(command) #help(result) print('subprocesss output: ', output) print('subprocess errors: ', errors) #while True: # line = errors.readline() # if line == "": break # logdata = line # logger(flog, logdata) # print(line,) # if debug: key=raw_input("Enter any key to continue: ") # check the files was transfered ipos=infile.rfind('/') outfile = stagepath + infile[ipos+1:] if os.access(outfile, os.F_OK): Transfered=True if not os.access(outfile, os.F_OK): scpfailurefile=outfile + '.scpfailure' scpfailurefileh = open(scpfailurefile, 'wt') logdata='WARNING: file NOT transfered: %s' % infile logger(flogerr, logdata) Transfered=False delay=delaytime*(itry*itry) logdata='WAITING: %s seconds' % delay logger(flog, logdata) time.sleep(delay) continue
def search_catalogue(filename=None, listfile=None, outpath=None, radius=2.0): """ """ print('listfile: ', listfile) table=Table.read(listfile, format='ascii') table.pprint() ralist=table['col1'] declist=table['col2'] catfile = filename[0:-4] + '_cat.fits' logdata='Process catalogue file: %s' % (catfile) logger(flog, logdata) deltatime=time.time() ipos=filename.rfind('/') outfile=outpath + filename[ipos+1:-4] + '_search.fits' logdata='Output file: %s' % outfile logger(flog, logdata) if os.path.exists(outfile): print('Skipping since outfile already exists for %s.' % (outfile)) return lockfile=outfile + '.lock' print('lockfile: ', lockfile) if os.path.exists(lockfile): logdata='Skipping since lockfile exists: ' + lockfile logger(flog, logdata) logdata = "Total elapsed time %.3f seconds" % (time.time() - starttime) logger(flog, logdata) return # create lockfile lkfile = open(lockfile, 'wt') logdata = "Create lockfile %s" % lockfile logger(flog, logdata) # write pid and hostname into lockfile lkfile.write(strftime("%Y-%m-%dT%H-%M-%S", gmtime())) lkfile.write(':pid: '+ str(pid) + '\n') lkfile.flush() #lockfile=listfile + '.lock.' + str(pid) #logdata = "Lockfile: " + lockfile #logger(flog, logdata) # use flock module #lock = flock(lockfile, True).acquire() if os.access(listfile, os.F_OK): # Read list file skipping comment lines records = [item for item in open(listfile) if item[0]<>'#'] numSources = len(records) logdata = 'Number of sources in listfile: %d' % numSources logger(flog, logdata) if not os.access(listfile, os.F_OK): logdata='List file: %s' % listfile logger(flog, logdata) logdata='List file problem' logger(flog, logdata) key=raw_input("Enter any key to continue: ") get_file(host=host, infile=catfile, transport='scp') logdata = "Delta elapsed time %.3f seconds" % (time.time() - deltatime) logger(flog, logdata) if not DryRun: logdata= 'Start processing the data' logger(flog, logdata) if debug: trace = traceback.extract_stack()[-1] print(os.path.basename(trace[0]), ' line :', str(trace[1])) key=raw_input("Debug: Enter any key to continue: ") ipos=filename.rfind('/') catfile = stagepath + catfile[ipos+1:] result=srlib.cat_cal(catfile, ralist, declist, 1, radius = radius) if result != None: result.write(outfile) print('catalogue search completed') #key=raw_input("Enter any key to continue: ") print('args.cache: ',args.cache) if os.path.exists(catfile) and not args.cache: print('Deleting data files used') print('Remove the cat file:' + catfile) try: os.remove(catfile) except OSError as (errno, strerror): logdata ="OS error({0}): {1}".format(errno, strerror) logger(flogerr, logdata) logdata = "error removing catfile %s " % catfile logger(flogerr, logdata) pass
cipher = DEFAULT_CH_CIPHER_SUITES_NAME cipher_value = DEFAULT_CH_CIPHER_SUITES_VALUE if (host == None) or (port == None) or (config_file == None): usage() sys.exit(2) # # set logger and config objects # if (log_file == None) or (config_file == None): print "Invalid log file/config file specified" usage() sys.exit(1) logger = logger(log_file) if logger.read_error == 1: print "Unable to open log file for writing" sys.exit(1) config = config(config_file) config.parse_and_validate() if config.read_error == 1: logger.toboth("Unable to open config file for reading") sys.exit(1) if config.config_status == CONFIG_INVALID: logger.toboth("Config file is having some lines in invalid format, \ please check") sys.exit(1) if config.valid_lines == 0: logger.toboth("No valid lines in config file")
# initializes logger from logger import * logger()
return returnDict def checkusername(username): if not username: return '' if "\\" in username['username']: return "domain_user" else: return '' if __name__ == "__main__": logging = logger() logger = logging.get_logger('ldap_auth') callname = sys.argv[1] dictin = readinputs() # find out if we are dealing with domain\username or just a username usertype = checkusername(dictin) userinfo = usertools.usertools(usertype) if usertype == "domain_user": DOMAIN_USER = True logger.info('method "%s" called' % (callname))
def process_image(filename=None, files=None, outpath=None): """ refactoring in progress """ deltatime=time.time() logdata= 'Image filename: ' + filename logger(flog, logdata) catfile = filename[0:-4] + '_cat.fits' logdata='Catalogue file: %s' % catfile logger(flog, logdata) confname = filename[:-4]+'_conf.fit' if verbose: logdata='Confidence map: %s' % confname logger(flog, logdata) # this is duplicated below! # find first file; needs to be refactored since only one file is input #if SelfTest: # ipos1=catfile.rfind('/') # ipos2=catfile.rfind('.fits') # outfile=outpath+catfile[ipos1+1:ipos2] + '_listdriven.fits' # if os.path.exists(outfile): # logdata='List-driven data already exists for %s.' % (outfile) # logger(flog, logdata) # return if not SelfTest: # strip of the path from the filename ipos=filename.rfind('/') listfile = filename[ipos+1:] + '.radec' outfile = outpath + listfile + '_listdriven.fits' logdata ='Output file: %s' % outfile logger(flog, logdata) listfile = listpath + listfile logdata='Listfile: %s' % listfile if SelfTest: logdata= 'Running self test regression using imcore catalogue' logger(flog, logdata) catfile = filename[0:-4] + '_cat.fits' logdata='catalogue file: %s' % catfile logger(flog, logdata) ipos1=catfile.rfind('/') ipos2=catfile.rfind('.fits') outfile=outpath+catfile[ipos1+1:ipos2] + '_listdriven.fits' print('outfile: ', outfile) if os.path.exists(outfile): print('List-driven data already exists for %s.' % (outfile)) return lockfile=outfile + '.lock' print('lockfile: ', lockfile) if os.path.exists(lockfile): logdata='Skipping since lockfile exists: ' + lockfile logger(flog, logdata) logdata = "Total elapsed time %.3f seconds" % (time.time() - starttime) logger(flog, logdata) return # create lockfile lkfile = open(lockfile, 'wt') logdata = "Create lockfile %s" % lockfile logger(flog, logdata) # write pid and hostname into file lkfile.write(strftime("%Y-%m-%dT%H-%M-%S", gmtime())) lkfile.write(':pid: '+ str(pid) + '\n') lkfile.flush() #another method #lockfile=listfile + '.lock.' + str(pid) #logdata = "Lockfile: " + lockfile #logger(flog, logdata) # use flock module #lock = flock(lockfile, True).acquire() if not SelfTest and os.access(listfile, os.F_OK): # Read list file skipping comment lines records = [item for item in open(listfile) if item[0]<>'#'] numSources = len(records) logdata = 'Number of sources in listfile: %d' % numSources logger(flog, logdata) if not SelfTest and not os.access(listfile, os.F_OK): logdata='List file: %s' % listfile logger(flog, logdata) logdata='List file problem' logger(flog, logdata) key=raw_input("Enter any key to continue: ") get_remotefile(filename=filename) get_remotefile(filename=confname) # check the files was transfered ipos=confname.rfind('/') confmapfile=stagepath + confname[ipos+1:] if not os.access(confmapfile, os.F_OK): n_errors=n_errors+1 logdata= 'WARNING: confmap file NOT transferred: ', confmapfile logger(flog, logdata) logger(flogerr, logdata) if os.path.exists(lockfile): logdata = "Delete lockfile %s " % lockfile logger(flog, logdata) os.remove(lockfile) if args.pawprints: # read the file to determine the constituent pawprints get_vista_pawprints(imagefile=imagefile, filename=filename, stagepath=stagepath) if SelfTest: print('SelfTest/Search catalogue file: ' + catfile) if os.path.exists(catfile): logdata='catalogue already exists %s.' % (catfile) logger(flog, logdata) if not os.path.exists(catfile): command ='time scp -i ~/.ssh/id_dsa_nk ' \ + ' '+ host + catfile \ + ' ' + stagepath +'/. ' logdata=command logger(flog, logdata) if debug: print(command) result=subprocess.Popen(command, stdout=subprocess.PIPE,stderr=subprocess.PIPE, shell=True) output, errors = result.communicate() #result=os.popen(command) #result=subprocess.Popen(command) #while True: # line = result.readline() # if line == "": break # logdata = line # logger(flog, logdata) # print(line,) # if debug: key=raw_input("Enter any key to continue: ") print('SelfTest cataloge file transferred: ' + catfile) ipos=catfile.rfind('/') catfile= stagepath + catfile[ipos:] if os.access(catfile, os.F_OK): print('catfile transfered OK: ',catfile) if not os.access(catfile, os.F_OK): print('WARNING: catfile NOT transfered: ',catfile) key=raw_input("Enter any key to continue: ") listfile=catfile logdata = "Delta elapsed time %.3f seconds" % (time.time() - deltatime) logger(flog, logdata) if not DryRun: logdata= 'Start processing the image data' logger(flog, logdata) if debug: trace = traceback.extract_stack()[-1] print(os.path.basename(trace[0]), ' line:', str(trace[1])) key=raw_input("Debug: Enter any key to continue: ") imcore_list_run(args=args, imagefile=imagefile, confmapfile=confmapfile, listfile=listfile, outfile=outfile) print('listdriven photometry completed') #key=raw_input("Enter any key to continue: ") print('args.cache: ', args.cache) if os.path.exists(imagefile) and not args.cache: print('Deleting data files used') print('Remove the image file:' + imagefile) try: os.remove(imagefile) except OSError as (errno, strerror): logdata ="OS error({0}): {1}".format(errno, strerror) logger(flogerr, logdata) logdata = "error removing imagefile %s " % imagefile logger(flogerr, logdata) pass
try: os.remove(imagefile) except OSError as (errno, strerror): logdata ="OS error({0}): {1}".format(errno, strerror) logger(flogerr, logdata) logdata = "error removing imagefile %s " % imagefile logger(flogerr, logdata) pass if os.path.exists(confmapfile) and not args.cache: print('Remove the confidence map:' + confmapfile) try: os.remove(confmapfile) except: logdata = "error removing confmapfile %s " % confmapfile logger(flogerr, logdata) pass if SelfTest: if os.path.exists(catfile) and not args.cache: print('Remove the catalogue fits file:' + catfile) try: os.remove(catfile) except: logdata = "error removing cataloge file%s " % catfile logger(flogerr, logdata) pass if os.path.exists(lockfile): logdata = "Delete lockfile %s " % lockfile logger(flog, logdata)