def get_data(): ''' Extracting data from application ''' no_of_arg = len(sys.argv) - 1 arg = sys.argv[1:] for i in range(no_of_arg): if ((arg[i][0] == "'") or (arg[i][0] == '"')): arg[i] = arg[i][1:-1] tupled_data = [] for i in arg: result = extract_info(i) tupled_data.append(list(result.values())) test_df = pd.DataFrame(data=tupled_data, columns=list(result.keys())) classes, prob, pred = check_malware(test_df) print('--------------------------------------') print('Application => Category => Probability') print('--------------------------------------') for i in range(test_df.shape[0]): # for ML model # print(app[i], ' => ', classes[i], ' => ', prob[i][pred[i]]) # for DL model print(arg[i], ' => ', classes[i], ' => ', prob[i][0])
def traced_extract_info(*args: Any, **kwargs: Any) -> Any: print(strategy_names) logger.new_stream() result = extract_info(*args, stages=stages, **kwargs) entry_types = decide_entry_type(result) trace = logger.stream.getvalue() exit_state = walk_graph(trace.split(), initial_state, graph) if 0 in exit_state or exit_state is final_state: assert EntryType.incorrect in entry_types return result
def main(issue_id: str, type_of_search: str, name: str) -> None: """ Обращается к API Sentry, забрает данные по указанному issue_id, для каждого issue формирует множество уникальных действий и передает в csv файл. :param issue_id: номер issue :param type_of_search: пока order_id или uid, это значение будет отображаться в первом столбце файла :param name: название файла """ data_from_api = get_data(issue_id) uniq_events = extract_info(type_of_search, data_from_api) for event in uniq_events: csv_creator(name, list(event))
def __call__(self, requirements, request): """ When run as a function, objects of this class run the implication algorithm. Depending on the settings, either the Prolog or Python version will be used. @param requirements: list of required attributes (strings). @param request: dictionary mapping the basic requirement names to their current values. """ restaurants = extract_info().extract_info("data/restaurant_info.csv", request) if requirements == []: return restaurants if self.prolog: return self.implications_prolog(requirements, request.values(), restaurants) else: return self.implications_python(requirements, restaurants)
def __init__(self): self.algorithm = multi_layer_perceptron() self.eInfo = extract_info() self.kAlgorithm = keyword_algorithm() self.configurations = extract().extract_settings() if os.path.exists("data/mlp_model.pkl"): with open("data/mlp_model.pkl", 'rb') as f: self.mlp, self.id_dict, self.scaler = pickle.load(f) else: self.mlp, self.id_dict, self.scaler = self.algorithm.mlp("data/dialog_acts.dat") with open("data/mlp_model.pkl", 'wb') as f_pickle: pickle.dump((self.mlp, self.id_dict, self.scaler), f_pickle) if self.configurations["CURRENT_CLASSIFIER"]["value"] == "dt": self.algorithm = decision_tree()
import sys, subprocess, os, glob from ultimateGradeCal import getCollabs from extract_info import extract_info if (len(sys.argv) < 4): print("Usage: python collabsEmail.py csv labSec labNo") exit(1) csvFileName = sys.argv[1] labSec = sys.argv[2] if len(sys.argv[2]) == 3 else "AY" + sys.argv[2] labNo = sys.argv[3] if len(sys.argv[3]) == 2 else "0" + sys.argv[3] netidList = list(extract_info(csvFileName, labSec)) subject = "No-Reply: Lab" + labNo + " Feedback" content = "This is a test send for lab" + labNo + ". Please reply to my email address [email protected] if you have questions." sentTrack = {} collabSendingList = [] for netid in netidList: sentTrack[netid] = False collabs = getCollabs(labSec[-1], labNo) path = "/class/cs101/etc/sxns/" + labSec # Generate feedbacks os.chdir(path) for submitter in collabs: filePath = glob.glob(path + "/feedback/" + submitter + "/lab" + labNo + "/*.html") if len(filePath) == 0: subprocess.call([ 'nbgrader', 'feedback', '--assignment=lab' + labNo, '--student=' + submitter, '--quiet' ]) # Send to the submitters
if args.device is not None: print(" ") print(" ** DEVICE selected using the --device command line" " option. Only do this if you know what you are doing!") print(" ** You should be using the CUDA_VISIBLE_DEVICES and/or" " GPU_DEVICE_ORDINAL environment variables instead.") if args.minimal_gcd: print(" ") print(" ** You chose to not use a standard IceCube GCD file but" "instead to create a trivial geometry from scratch.") print(" ** This geometry only has 24 DOMs, so there are fewer" "collision checks.") print(" ** This usually means propagation is faster, but unrealistic." " Might differ from GPU type to GPU type.") args.xml_file = args.outbase + ".xml" args.i3_file = args.outbase + ".i3.bz2" run_test(args) # Print benchmark info out if companion module is available try: from extract_info import extract_info except ImportError: print("Could not import extract_info companion script for printing" " results.") else: extract_info(args.xml_file)
def manager(self, file, name, command): ''' Given the file, name of the entity, and command the user entered, this function figures out whether the search term is a band or person, was in the training or outside of it, and whether it has a members or associated acts section in the info box. If the entity has members or associated acts, it grabs those names. Depending on this set of attributes, it calls the appropriate function in extract_info.py. This is necessary in order for the program to adequately filter extracted relationships and match them based on known results when possible. :param file: the pathname to the file of interest :type file: string :param name: the name of the entity of interest :type name: string :param command: the first word of input, which is either 'person' or 'band' "type command: string :return: does not return anything ''' from scrape_page import scrape_page from extract_info import extract_info scrape_page = scrape_page(file) scrape_page.populate_soup() soup = scrape_page.get_soup() # Figure out three attributes: person or band, in training or outside of it, and whether there is a members or associated acts section if command == 'band': self.is_band = True scrape_page.populate_members() if name in self.training_bands: self.in_training = True # Populate a list of members if that section exists in the info box self.members = scrape_page.get_members() if self.members: self.has_members_or_acts = True else: self.members = scrape_page.get_members() if self.members: self.has_members_or_acts = True else: scrape_page.populate_acts() if name in self.training_people: self.in_training = True # Populate a list of associated acts if that section exists in the info box self.acts = scrape_page.get_acts() if self.acts: self.has_members_or_acts = True else: self.acts = scrape_page.get_acts() if self.acts: self.has_members_or_acts = True # Special case, populate associated acts with known list of potential bands if the search term was a person in training without acts if self.is_band == False and self.in_training == True and self.has_members_or_acts == False: self.acts = self.training_bands # Make an instance for extract_info.py and use the ner model to flag entities extract_info = extract_info(name, self.mitie_dir, self.mitie_ner, soup, self.is_band, self.members, self.acts) extract_info.populate_entities() # Depending on the search term attributes, call different functions in extract_info.py. These filter the extracted relationships # differently, using known information to get better results when possible if (self.is_band == True and self.has_members_or_acts == True) or (self.is_band == True and self.in_training == True and self.has_members_or_acts == False): extract_info.filter_entities_by_score() extract_info.band_in_or_outside_training_with_members() elif (self.is_band == False and self.has_members_or_acts == True) or ( self.is_band == False and self.has_members_or_acts == True): extract_info.filter_entities_by_score() extract_info.person_in_or_outside_training_with_acts() elif (self.is_band == False and self.in_training == True and self.has_members_or_acts == False): extract_info.filter_entities_by_score() extract_info.person_in_training_without_acts() elif (self.is_band == True and self.in_training == False and self.has_members_or_acts == False): extract_info.filter_entities_by_search() extract_info.band_outside_training_without_members() else: extract_info.filter_entities_by_search() extract_info.person_outside_training_without_acts()