def conn(self): static_ = static() cnx = mysql.connect(host=static_.host, user=static_.user, passwd=static_.password, db=static_.database) return cnx
def fetch_course_names(self): campus_id = static() campuses = campus_id.campusid subjectnames = [] for x in campuses: x = self.campusid_constructor(len(str(x)), x) url = 'https://eservices.minnstate.edu/registration/search/basic.html?campusid={}'.format(x) jsonlist = [] subjectpage = requests.get(url) parsedpage = BeautifulSoup(subjectpage.text, 'html.parser') courses = parsedpage.find('select', id='subject').find_all('option') for course in tqdm(courses): abbrev = course['value'] if abbrev == "": pass else: name = course.contents[0].split('(')[0].strip(' \n') synonyms = [] jsondata = {} jsondata['value'] = abbrev synonyms.append(abbrev) synonyms.append(abbrev.lower()) synonyms.append(name) synonyms.append(name.lower()) jsondata['synonyms'] = synonyms jsonlist.append(jsondata) jsondata['original'] = name for i in jsonlist: subjectnames.append([i['value'], i['original'],x]) return subjectnames
def main(): import ConfigParser config = ConfigParser.ConfigParser() config.read('config.cfg') pcapfile = config.get('file', 'pcapfile', 0) fpcap = file(pcapfile, "rb") print "analysis the pcap...s" http_datas = read_pcap(fpcap) print "statics the results.." top_hosts, hosts = static(http_datas) #write result in pdf print "write the result in pdf.." print_results(top_hosts, hosts)
def main(): import ConfigParser config = ConfigParser.ConfigParser() config.read('config.cfg') pcapfile = config.get('file', 'pcapfile', 0) fpcap = file(pcapfile,"rb") print "analysis the pcap...s" http_datas = read_pcap(fpcap) print "statics the results.." top_hosts,hosts = static(http_datas) #write result in pdf print "write the result in pdf.." print_results(top_hosts,hosts)
from mdutils import MdUtils from static import static from util import str2time target_list = ["casbin", "casdoor"] start_time = "2021-01-10T00:50:59Z" end_time = "2021-05-17T00:50:59Z" data = static(start_time, end_time) info = data.get_info() open_issues = [] open_prs = [] review_prs = [] comment_issues = [] for node in info['data']['user']['contributionsCollection'][ "issueContributions"]['nodes']: repo = node["issue"]["repository"]["nameWithOwner"] for target in target_list: if repo.find(target) != -1: open_issues.append(node) break for node in info['data']['user']['contributionsCollection'][ "pullRequestContributions"]['nodes']: repo = node["pullRequest"]["repository"]["nameWithOwner"] for target in target_list: if repo.find(target) != -1:
#!/usr/bin/env python import sys import static import dynamic import memory if __name__ == "__main__": if len(sys.argv) == 1: print "Enter your file name as argument" sys.exit() print "Performing static analysis" static.static(sys.argv[1]) dynamic.dynamic(sys.argv[1], sys.argv[2]) memory.memory(sys.argv[3])
def fetch_course_data(self): static_ = static() course_data, url, uri = [], [], [] campuses = static_.campusid subject_name_data = pd.read_json("./data/subject_name_data.json") size = (subject_name_data.abbrev).size for campus in campuses: for x in range(size): if campus == int(subject_name_data['campus_id'][x]): urls = self.makesearchpageurl( str(campus), str(static_.yrtr), subject_name_data['abbrev'][x]) url.append([urls, campus]) print("Fetching course urls...") uri = self.create_uri(url, static_.yrtr) print("Fetching course data...") for urls, campus in tqdm(uri): try: html_doc = requests.get(urls).content parsed = BeautifulSoup(html_doc, "html.parser") table = parsed.find('table', class_='myplantable').find('tbody', class_='course-detail-summary') \ .find_all('td') id = table[1].text.strip() subj = table[2].text.strip() number = table[3].text.strip() sec = table[4].text.strip() title = table[5].text.strip() dates = table[6].text.strip() day = table[7].text.strip() times = table[8].text.strip() crd = table[9].text.strip() status = table[10].text.strip() instructor = table[11].text.strip() yrtr = static_.yrtr campus_id = campus # Sorting term data find_term = lambda a: "summer" if a == '1' else ( "fall" if a == '3' else ("spring" if a == '5' else "j-term'")) term = find_term(str(yrtr)[4:]) # Sorting out year data find_year = lambda a: int(a[:4])-1 if a[:4] == '1' \ else (int(a[:4])-1 if a[4:] == '3' else (a[:4] if a == '5' else int(a[:4])-1)) year = find_year(str(yrtr)) offered_through = parsed.find_all('table', class_='meetingTable')[1].find_all('tr')[0] \ .find_all('td')[0].text.replace('Offered through:', '').strip() campus = parsed.find_all('table', class_='meetingTable')[1].find_all('tr')[1] \ .find_all('td')[0].text.replace('Campus:', '').strip() location = parsed.find_all('table', class_='meetingTable')[1].find_all('tr')[1] \ .find_all('td')[1].text.replace('Location:', '').strip() tuition = parsed.find('table', class_='plain fees') try: res_tuition = tuition.find_all('tr')[0].find( 'td').text.replace('$', '').replace(',', '').strip() non_res_tuition = tuition.find_all('tr')[1].find( 'td').text.replace('$', '').replace(',', '').strip() fees = tuition.find_all('tr')[2].find('td').text.replace( '$', '').replace(',', '').strip() alldetails = parsed.find_all('div', class_='detaildiv') description = alldetails[-1].next_sibling.replace( '\t', '').replace('\n', '').strip() except Exception as expt: self.logger.warning("Error: {}".format(expt)) res_tuition, non_res_tuition, fees, description = 'N/A', 'N/A', 'N/A', 'N/A' data = [ id, subj, number, sec, term, year, title, dates, day, urls, times, crd, status, instructor, offered_through, campus, location, res_tuition, non_res_tuition, fees, yrtr, description, campus_id ] course_data.append(data) except Exception as expt: self.logger.warning("Error: {}".format(expt)) return course_data