def __init__(self): self.conf = Conf() self.listOfComp = self.conf.getComp() self.listOfSub = self.conf.getSub() self.tmpOpt1 = [] self.tmpOpt2 = []
def __init__(self, conf_json, model, optimizer=None, name=None): try: data_path = os.environ['STACK_OVER_FLOW_QA'] except KeyError: print( "STACK_OVER_FLOW_QA is not set. Set it to your clone of https://github.com/mrezende/stack_over_flow_python" ) sys.exit(1) self.conf = Conf(conf_json) self.model = model(self.conf) if name is None: self.name = self.conf.name() + '_' + model.__name__ logger.info(f'Initializing Evaluator ...') logger.info(f'Name: {self.name}') else: self.name = name self.path = data_path self.params = self.conf.training_params() optimizer = self.params['optimizer'] if optimizer is None else optimizer self.model.compile(optimizer) self.answers = self.load('answers.json') # self.load('generated') self.training_data = self.load('training.json') self.dev_data = self.load('dev.json') self.eval_data = self.load('eval.json') self._vocab = None self._reverse_vocab = None self._eval_sets = None self.top1_ls = [] self.mrr_ls = []
def start(self): local_conf = Conf("local") self.log_folder = local_conf.get_string("report.folder") if not self.log_folder: self.log_folder = os.getcwd() + "/log/" if not self.log_folder.endswith("/"): self.log_folder += "/" local_utils.prepare_template(self.log_folder) local_utils.prepare_current_log_folder(self.log_folder)
def __init__(self): conf = Conf("remote") remote_enable = conf.get_string("enable") if remote_enable.lower() == "true": print("Enable remote Reporter") self.reporters.append(difido.RemoteReport()) else: print("remote reporter disabled") self.reporters.append(difido.Console())
def pytest_collection_finish(session): """ called after collection has been performed and modified. :param _pytest.main.Session session: the pytest session object """ print("~~~~~~~~~~~~~~~~~~~~~~~~~`Automation Ver :: "+automation_ver+"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~") conf = Conf("remote") description = conf.get_string("description") reporter.start_suite(description, testAttr) reporter.add_execution_properties("automation_version", automation_ver)
def __init__(self, host, port, name): self.host = host self.port = port self.name = name self.lastTime = 0l self.startTime = psutil.boot_time() self.totalDisk = 0l self.freeDisk = 0l self.usedDisk = 0l self.percentDisk = 0l self.totalMemory = 0l self.usedMemory = 0l self.availableMemory = 0l self.percentMemory = 0l self.cpuCount = 0 self.cpuTotalPercent = 0 self.availWorkers = OrderedDict() self.availServices = OrderedDict() self.workers = OrderedDict() self.services = OrderedDict() self.diskPath = Conf.getNodeDiskPath()
def __init__(self,host,port,name): self.host = host self.port = port self.name = name self.lastTime = 0l self.startTime = psutil.boot_time() self.totalDisk = 0l self.freeDisk = 0l self.usedDisk = 0l self.percentDisk = 0l self.totalMemory = 0l self.usedMemory = 0l self.availableMemory = 0l self.percentMemory = 0l self.cpuCount = 0 self.cpuTotalPercent = 0 self.availWorkers = OrderedDict() self.availServices = OrderedDict() self.workers = OrderedDict() self.services = OrderedDict() self.diskPath = Conf.getNodeDiskPath()
def start(self): conf = Conf("remote") self.execution_properties = conf.get_dict("execution.properties") details = ExecutionDetails() details.description = conf.get_string("description") details.execution_properties = self.execution_properties try: self.execution_id = remote_utils.prepare_remote_execution(details) self.enabled = True except: self.enabled = False return machine = self.execution.get_last_machine() try: self.machine_id = remote_utils.add_machine(self.execution_id, machine) except: self.enabled = False return self.retries = 10
def save_predict_results(self, conf_json): conf = Conf(conf_json) # copy models to archive folder models_folder = 'models' models_file = f'models/weights_epoch_{conf.name()}.h5' os.makedirs(os.path.join(self.base_folder, models_folder), exist_ok=True) archive_models_file = os.path.join(self.base_folder, models_file) self.copy(models_file, archive_models_file) score_file = 'results_conf.txt' archive_score_file = os.path.join(self.base_folder, score_file) self.move(score_file, archive_score_file)
def __init__(self, args): super(Data_loader, self).__init__() print(args) self.conf = Conf(args) self.args = args self.patient_id = args.patient_id self.patient_test_date = args.patient_test_date self.data = {} self.verbose = args.verbose self.save_per_patient = args.save_per_patient self.extract_incident = args.extract_incident self.save_dir = args.save_dir self.label_previous_day = args.label_previous_day if self.patient_id is not None and self.patient_test_date is None: raise ValueError('test date must be provided') self.env_feat_list = { 0: ['Fridge'], 1: ["living room", 'Lounge'], 2: ['Bathroom'], 3: ['Hallway'], 4: ['Bedroom'], 5: ['Kitchen'], 6: ['Microwave', 'Toaster'], 7: ['Kettle'], } if args.incident == 'all': self.incident = ['UTI symptoms', 'Agitation'] elif args.incident == 'UTI': self.incident = ['UTI symptoms'] elif args.incident == 'Agitation': self.incident = ['Agitation'] assert not (self.label_previous_day and self.args.extract_uti_phase ), 'only one of them can be True' path = self.conf.npy_data if self.save_dir is not None: path = path + '/' + self.save_dir save_mkdir(path) self.load_env() self.save_data()
def save_training_results(self, conf_json): conf = Conf(conf_json) # move plots to archive folder # plot_folder = 'plots' # plot_filename = f'{conf.name()}_plot.png' # plot_file = os.path.join(plot_folder, plot_filename) # os.makedirs(os.path.join(self.base_folder, plot_folder), exist_ok=True) # archive_plot_result_file = os.path.join(self.base_folder, plot_file) # self.move(plot_file, archive_plot_result_file) # copy models to archive folder models_folder = 'models' models_file = f'models/weights_epoch_{conf.name()}.h5' os.makedirs(os.path.join(self.base_folder, models_folder), exist_ok=True) archive_models_file = os.path.join(self.base_folder, models_file) self.copy(models_file, archive_models_file)
class Ctrl(): def __init__(self): self.conf = Conf() self.listOfComp = self.conf.getComp() self.listOfSub = self.conf.getSub() self.tmpOpt1 = [] self.tmpOpt2 = [] @staticmethod def openfile(e=None): from gui import Gui filename = Gui.exploreFile() if e!=None: Ctrl.setText(e, filename) @staticmethod def emptyText(e): e.delete(0,tk.END) @staticmethod def setText(e, text): Ctrl.emptyText(e) e.insert(0, text) @staticmethod def checkRun(e, p, w): from gui import Gui if not os.path.exists(e.get()): Gui.alertErr("Error", "Configuration file not found", w) return try: conf = e.get() ce = CompilationEngine(conf) m = Manipulator(conf) p['value'] = 20 w.update_idletasks() m.generateSource() p['value'] = 40 w.update_idletasks() ce.createExes() p['value'] = 60 w.update_idletasks() complist = ce.getReport() vt = VirusTotal() p['value'] = 80 w.update_idletasks() report = vt.getScore(complist) p['value'] = 100 w.update_idletasks() str = "" i = 0 for x,y in complist.items(): str += "<{} : {}>\n".format(x,report[i]) i+=1 Gui.alertInfo("Result", str, w) except Exception as err: p['value'] = 0 w.update_idletasks() Gui.alertErr("Error", err, w) @staticmethod def checkClear(e, p, w): from gui import Gui if not os.path.exists(e.get()): Gui.alertErr("Error", "Configuration file not found", w) return try: conf = e.get() ce = CompilationEngine(conf) ce.clear() p['value'] = 100 w.update_idletasks() Gui.alertInfo("Info", "Cleaning completed", w) except Exception as err: p['value'] = 0 w.update_idletasks() Gui.alertErr("Error", err, w) def remFromListComp(self,l): from gui import Gui if not l.curselection(): Gui.alertInfo("Info", "Select an element to delete it") return del(self.listOfComp[l.curselection()[0]]) Gui.remFromListSelected(l) def remFromListSub(self,l): from gui import Gui if not l.curselection(): Gui.alertInfo("Info", "Select an element to delete it") return del(self.listOfSub[l.curselection()[0]]) Gui.remFromListSelected(l) def remFromListOpt(self,l, index, root): from gui import Gui if not l.curselection(): Gui.alertInfo("Info", "Select an element to delete it", root) return if index == 0 : del(self.tmpOpt1[l.curselection()[0]]) else: del(self.tmpOpt2[l.curselection()[0]]) Gui.remFromListSelected(l) def fillCompField(self, e, l, list): from gui import Gui inex = self.listOfComp[list.curselection()[0]] Ctrl.setText(e[0], inex.getName()) Ctrl.setText(e[1], inex.getPath()) if len(inex.getOpt1()) > 0: for el in inex.getOpt1(): Gui.addElToList(l[0], el) self.tmpOpt1.append(el) if len(inex.getOpt2()) > 0: for el in inex.getOpt2(): Gui.addElToList(l[1], el) self.tmpOpt2.append(el) def checkSub(self, top, e, list): from gui import Gui if len(e[0].get()) != 0 and len(e[1].get()) != 0: self.conf.addToSub((e[0].get(),e[1].get())) Gui.addElToList(list, (e[0].get(),e[1].get())) Gui.destroyTop(top) else: Gui.alertErr("Error", "Both fields are obligatory", top) def checkOption(self, e, l, index, root): from gui import Gui if len(e[0].get()) > 0: t = None if len(e[1].get()) > 0: if len(e[2].get()) > 0: t = (e[0].get(), e[1].get().split(e[2].get())) Gui.addElToList(l, t) else: t = (e[0].get(), e[1].get().split(None)) Gui.addElToList(l, t) else: t = (e[0].get(),[]) Gui.addElToList(l, t) if index == 0: self.tmpOpt1.append(t) else: self.tmpOpt2.append(t) else: Gui.alertErr("Error", "Option name is obligatory", root) Ctrl.emptyText(e[0]) Ctrl.emptyText(e[1]) Ctrl.emptyText(e[2]) def checkComp(self, top, e, list): from gui import Gui comp = Comp() if len(e[0].get()) != 0 and len(e[1].get()) != 0: comp.setName(e[0].get()) comp.setPath(e[1].get()) for t in self.tmpOpt1: comp.addOpt1(t) for t in self.tmpOpt2: comp.addOpt2(t) self.tmpOpt1 = [] self.tmpOpt2 = [] self.listOfComp.append(comp) Gui.addElToList(list, str(comp)) Gui.destroyTop(top) else: Gui.alertErr("Error", "Fill obligatory fields", top) def checkConf(self, e, l): from gui import Gui if len(e[0].get()) == 0 or len(e[1].get()) == 0 or len(e[2].get()) == 0 or len(e[3].get()) == 0: Gui.alertErr("Error", "Fill obligatory fields") return if len(self.listOfComp) == 0: Gui.alertErr("Error", "Insert at least a compilation test") return if len(e[6].get()) == 0: Gui.alertErr("Error", "Insert configuration file name") return self.conf.setTemplatePath(e[0].get()) self.conf.setPayloadPath(e[1].get()) self.conf.setSpecialChar(e[2].get()) self.conf.setPlaceholderPayload(e[3].get()) if len(e[4].get()) != 0: try: rate = float(e[4].get()) if not(rate >= 0 and rate < 1): Gui.alertErr("Error", "The rate must be a decimal between (0,1]") return self.conf.setFreq(rate) except ValueError: Gui.alertErr("Error", "The rate must be a decimal between (0,1]") return if len(e[5].get()) != 0: self.conf.setOut(e[5].get()) #vettore sub gestito a runtime print(str(self.conf)) with open(e[6].get(), "w") as f: tmp = str(self.conf) tmp = tmp.replace("\'", "\"") f.write(tmp) Gui.alertErr("Info", "File saved : {}".format(e[6].get())) def updateComp(self, top, e, list, index): comp = Comp() if len(e[0].get()) != 0 and len(e[1].get()) != 0: from gui import Gui comp.setName(e[0].get()) comp.setPath(e[1].get()) for t in self.tmpOpt1: comp.addOpt1(t) for t in self.tmpOpt2: comp.addOpt2(t) self.tmpOpt1 = [] self.tmpOpt2 = [] self.listOfComp[index] = comp Gui.remFromList(list, index) Gui.addElToList(list, str(comp), index) Gui.destroyTop(top)
class Evaluator: def __init__(self, conf_json, model, optimizer=None, name=None): try: data_path = os.environ['STACK_OVER_FLOW_QA'] except KeyError: print( "STACK_OVER_FLOW_QA is not set. Set it to your clone of https://github.com/mrezende/stack_over_flow_python" ) sys.exit(1) self.conf = Conf(conf_json) self.model = model(self.conf) if name is None: self.name = self.conf.name() + '_' + model.__name__ logger.info(f'Initializing Evaluator ...') logger.info(f'Name: {self.name}') else: self.name = name self.path = data_path self.params = self.conf.training_params() optimizer = self.params['optimizer'] if optimizer is None else optimizer self.model.compile(optimizer) self.answers = self.load('answers.json') # self.load('generated') self.training_data = self.load('training.json') self.dev_data = self.load('dev.json') self.eval_data = self.load('eval.json') self._vocab = None self._reverse_vocab = None self._eval_sets = None self.top1_ls = [] self.mrr_ls = [] ##### Resources ##### def save_conf(self): self.conf.save_conf() def load(self, name): return json.load(open(os.path.join(self.path, name), 'r')) def vocab(self): if self._vocab is None: reverse_vocab = self.reverse_vocab() self._vocab = dict( (v, k.lower()) for k, v in reverse_vocab.items()) return self._vocab def reverse_vocab(self): if self._reverse_vocab is None: samples = self.load('samples_for_tokenizer.json') tokenizer = Tokenizer() tokenizer.fit_on_texts(samples) self._reverse_vocab = tokenizer.word_index return self._reverse_vocab ##### Loading / saving ##### def save_epoch(self, name=None): if not os.path.exists('models/'): os.makedirs('models/') suffix = self.name if name is None else name logger.info(f'Saving weights: models/weights_epoch_{suffix}.h5') self.model.save_weights(f'models/weights_epoch_{suffix}.h5', overwrite=True) def load_epoch(self, name=None): suffix = self.name if name is None else name assert os.path.exists(f'models/weights_epoch_{suffix}.h5' ), f'Weights at epoch {suffix} not found' logger.info(f'Loading weights: models/weights_epoch_{suffix}.h5') self.model.load_weights(f'models/weights_epoch_{suffix}.h5') ##### Converting / reverting ##### def convert(self, words): rvocab = self.reverse_vocab() if type(words) == str: words = words.strip().lower().split(' ') return [rvocab.get(w, 0) for w in words] def revert(self, indices): vocab = self.vocab() return [vocab.get(i, 'X') for i in indices] ##### Padding ##### def padq(self, data): return self.pad(data, self.conf.question_len()) def pada(self, data): return self.pad(data, self.conf.answer_len()) def pad(self, data, len=None): from keras.preprocessing.sequence import pad_sequences return pad_sequences(data, maxlen=len, padding='post', truncating='post', value=0) ##### Training ##### def get_time(self): return strftime('%Y-%m-%d %H:%M:%S', gmtime()) def train_and_evaluate(self, mode='train'): val_losses = [] if mode == 'train': val_loss = self.train(self.training_data) val_losses.append(val_loss) logger.info(f'Val loss: {val_loss}') elif mode == 'evaluate': results = {'top1': [], 'mrr': [], 'positions': []} logger.info('Evaluating...') for i in range(0, 20): top1, mrr, positions = self.evaluate(shuffle=True) results['top1'].append(top1) results['mrr'].append(mrr) results['positions'].append(positions) logger.info( f'Iteration: {i}: Top-1 Precision {top1}, MRR {mrr}, Positions: {positions}' ) df = pd.DataFrame(results) top1_desc = df.describe()['top1'] mrr_desc = df.describe()['mrr'] # save histogram plot report = ReportResult( {'positions': np.append([], results['positions'])}, index=[ i for i in range( 1, len(np.append([], results['positions'])) + 1) ], plot_name=f'histogram_{self.name}') report.generate_histogram() report.save_plot() logger.info(f'Top1 Description: {top1_desc}') logger.info(f'MRR Description: {mrr_desc}') def evaluate(self, X=None, name=None, shuffle=False): self.load_epoch(name) data = self.eval_data if X is None else X top1, mrr, positions = self.get_score(data, verbose=True, shuffle=shuffle) return top1, mrr, positions def train(self, X): batch_size = self.params['batch_size'] validation_split = self.params['validation_split'] nb_epoch = self.params['nb_epoch'] # top_50 = self.load('top_50') questions = list() good_answers = list() for j, q in enumerate(X): questions += [q['question']] * len(q['good_answers']) good_answers += q['good_answers'] logger.info('Began training at %s on %d samples' % (self.get_time(), len(questions))) questions = self.padq(questions) good_answers = self.pada(good_answers) # According to NN Design Book: # For this reason it is best to try several different initial guesses in order to ensure that # a global minimum has been obtained. best_top1_mrr = {'top1': 0, 'mrr': 0} hist_losses = {'val_loss': [], 'loss': []} for i in range(1, nb_epoch + 1): bad_answers = self.pada( random.sample(self.answers, len(good_answers))) logger.info(f'Fitting epoch {i}') hist = self.model.fit([questions, good_answers, bad_answers], epochs=1, batch_size=batch_size, validation_split=validation_split, verbose=1) val_loss = hist.history['val_loss'][0] loss = hist.history['loss'][0] hist_losses['val_loss'].append(val_loss) hist_losses['loss'].append(loss) # temporary weights from last training self.save_epoch('aux') # check MRR top1, mrr, positions = self.evaluate(self.dev_data, 'aux') if mrr > best_top1_mrr['mrr']: best_top1_mrr['top1'] = top1 best_top1_mrr['mrr'] = mrr logger.info( f'Epoch {i} Loss = {loss}, Validation Loss = {val_loss} ' + f'(Best: TOP1 = {top1}, MRR = {mrr})') # saving weights self.save_epoch() # Article: "Summarizing Source Code using a Neural Attention Model" # terminate training when the learning rate goes # below 0.001. if loss < 0.001: break # save plot val_loss, loss report = ReportResult( hist_losses, [i for i in range(1, len(hist_losses['loss']) + 1)], self.name) plot = report.generate_line_report() report.save_plot() logger.info(f'saving loss, val_loss plot') # save conf self.save_conf() clear_session() return val_loss def get_score(self, X, verbose=False, shuffle=False): c_1, c_2 = 0, 0 random_bad_answers = random.sample(self.answers, 49) logger.info(f'len X: {len(X)}') positions = [] for i, d in enumerate(X): bad_answers = d[ 'bad_answers'] if shuffle is False else random_bad_answers answers = d['good_answers'] + bad_answers answers = self.pada(answers) question = self.padq([d['question']] * len(answers)) sims = self.model.predict([question, answers]) n_good = len(d['good_answers']) max_r = np.argmax(sims) max_n = np.argmax(sims[:n_good]) r = rankdata(sims, method='max') if verbose: min_r = np.argmin(sims) amin_r = answers[min_r] amax_r = answers[max_r] amax_n = answers[max_n] logger.info(' ----- begin question ----- ') logger.info(' '.join(self.revert(d['question']))) logger.info('Predicted: ({}) '.format(sims[max_r]) + ' '.join(self.revert(amax_r))) logger.info( 'Expected: ({}) Rank = {} '.format(sims[max_n], r[max_n]) + ' '.join(self.revert(amax_n))) logger.info('Worst: ({})'.format(sims[min_r]) + ' '.join(self.revert(amin_r))) logger.info(' ----- end question ----- ') c_1 += 1 if max_r == max_n else 0 position = r[max_r] - r[max_n] + 1 c_2 += 1 / float(position) positions.append(position) top1 = c_1 / float(len(X)) mrr = c_2 / float(len(X)) print('Top-1 Precision: %f' % top1) print('MRR: %f' % mrr) return top1, mrr, positions def save_score(self): with open('results_conf.txt', 'a+') as append_file: conf_json, name = self.conf.conf_json_and_name() top1_precisions = ','.join(self.top1_ls) mrrs = ','.join(self.mrr_ls) append_file.write( f'{name}; {conf_json}; top-1 precision: {top1_precisions}; MRR: {mrrs}\n' )
def __init__(self): self.general_conf = Conf("general") self.init_model() self.start() self.num_of_suites_to_ignore = self.general_conf.get_int( "num.of.suites.to.ignore")
class AbstractReport(object): TIME_FORMAT = '%H:%M:%S:' DATE_FORMAT = '%Y/%m/%d' ROBOT_FORMAT = '%Y%m%d %H:%M:%S' def __init__(self): self.general_conf = Conf("general") self.init_model() self.start() self.num_of_suites_to_ignore = self.general_conf.get_int( "num.of.suites.to.ignore") def init_model(self): self.execution = Execution() machine = Machine(socket.gethostname()) machine.planned_tests = self.general_conf.get_int("planned.tests") self.execution.add_machine(machine) self.uid = str(randint(1000, 9999) + time.time() / 1000).replace( ".", "") self.index = 0 self.scenario_stack = [] self.buffered_elements = [] self.testDetails = None def start_suite(self, name, attr): self.execution.get_last_machine().planned_tests = 0 if self.num_of_suites_to_ignore > 0: self.num_of_suites_to_ignore -= 1 return self.num_of_suites_to_ignore -= 1 self.scenario = Scenario(name) self.scenario.scenarioProperties = self.get_additional_execution_properties( ) if len(self.scenario_stack) != 0: self.scenario_stack[-1].add_child(self.scenario) else: self.execution.get_last_machine().add_child(self.scenario) self.scenario_stack.append(self.scenario) self.write_execution() def end_suite(self, name, attrs): self.num_of_suites_to_ignore += 1 if self.num_of_suites_to_ignore > 0: return if len(self.scenario_stack) != 0: self.scenario_stack.pop() self.testDetails = None def end_test(self, name, attrs): if len(self.scenario_stack) == 0: return self.test.set_status(attrs["status"]) self.test.duration = attrs["elapsedtime"] self.write_test_details() self.write_execution() def start_test(self, name, attrs): if len(self.scenario_stack) == 0: return self.test_start_time = datetime.strptime(attrs['starttime'], AbstractReport.ROBOT_FORMAT) self.test = Test(self.index, name, self.uid + "_" + str(self.index), attrs['className']) self.testDetails = TestDetails(self.uid + "_" + str(self.index)) self.index += 1 self.test.timstamp = self.test_start_time.strftime( AbstractReport.TIME_FORMAT) self.test.date = self.test_start_time.strftime( AbstractReport.DATE_FORMAT) self.test.description = attrs['doc'] self.scenario.add_child(self.test) if len(self.buffered_elements) > 0: for element in self.buffered_elements: self.testDetails.add_element(element) self.buffered_elements = [] self.write_execution() self.write_test_details() def log_message(self, message): if len(self.scenario_stack) == 0: return element = ReportElement() timestamp = datetime.strptime(message['timestamp'], AbstractReport.ROBOT_FORMAT) element.time = timestamp.strftime(AbstractReport.TIME_FORMAT) element.title = message['message'] if message["level"] == ReportElementStatus.FAILURE: element.set_status("failure") if self.test is not None: self.test.set_status("failure") self.add_report_element(element) def message(self, message): pass def start(self): pass def close(self): if self.testDetails is not None: self.write_test_details() self.write_execution() def write_test_details(self): pass def write_execution(self): pass def get_additional_execution_properties(self): ''' return dictionary that will be added as properties to the execution ''' return {} def add_report_element(self, element): if self.testDetails is None: self.buffered_elements.append(element) else: self.testDetails.add_element(element) self.write_test_details() def add_test_property(self, key, value): self.test.add_property(key, value) self.write_test_details() def add_exeution_property(self, key, value): pass
''' Created on Aug 10, 2017 @author: Itai Agmon ''' import json from configuration import Conf from http import client import requests conf = Conf("remote") def prepare_remote_execution(details): res = send_request(method="POST", url="http://" + conf.get_string("host") + ":" + str(conf.get_int("port")) + "/api/executions", data=to_content(details), headers={"Content-Type": "application/json"}) return res.content.decode() def add_machine(execution_id, machine): res = send_request("POST", "http://" + conf.get_string("host") + ":" + str(conf.get_int("port")) + "/api/executions/" + execution_id + "/machines/", to_content(machine), {"Content-Type": "application/json"}) return res.content.decode() def update_machine(execution_id, machine_id, machine): send_request("PUT", "http://" + conf.get_string("host") + ":" + str(conf.get_int("port")) + "/api/executions/{0}/machines/{1}".format(str(execution_id), str(machine_id)), to_content(machine), {"Content-Type": "application/json"}) def add_test_details(execution_id, test_details): send_request("POST", "http://" + conf.get_string("host") + ":" + str(conf.get_int("port")) + "/api/executions/{0}/details".format(str(execution_id)), to_content(test_details), {"Content-Type": "application/json"})
class ConfigCreator(): def __init__(self): self.conf = Conf() self.listOfComp = self.conf.getComp() self.listOfSub = self.conf.getSub() self.tmpOpt1 = [] self.tmpOpt2 = [] def create(self, payload_file, template_file, special_char, payload_placeholder, rate, outputname, subs1, string1, compilers, option1_gcc, option2_gcc, option1_gplusplus, option2_gplusplus, value1_gcc, value2_gcc, value1_gplusplus, value2_gplusplus, hexed_filename): ops1_gcc = [] ops2_gcc = [] ops1_gplusplus = [] ops2_gplusplus = [] ops1_gcc.extend([option1_gcc, value1_gcc]) ops1_gplusplus.extend([option1_gplusplus, value1_gplusplus]) ops2_gcc.extend([option2_gcc, value2_gcc]) ops2_gplusplus.extend([option2_gplusplus, value2_gplusplus]) config_filename = hexed_filename + ".conf.json" path = os.path.join(app.config['UPLOAD_FOLDER'], hexed_filename, "config", config_filename) self.conf.setTemplatePath(template_file) self.conf.setPayloadPath(payload_file) self.conf.setSpecialChar(special_char) self.conf.setPlaceholderPayload(payload_placeholder) rate = float(rate) self.conf.setFreq(rate) self.conf.setOut(outputname) for subs, string in zip(subs1, string1): self.conf.addToSub((subs, string)) comps = [] for compiler in compilers: comp = Comp() comp.setName(compiler) comp.setPath("edit") if compiler == "gcc": self.tmpOpt1.append(ops1_gcc) self.tmpOpt2.append(ops2_gcc) elif compiler == "g++": self.tmpOpt1 = [] self.tmpOpt2 = [] self.tmpOpt1.append(ops1_gplusplus) self.tmpOpt2.append(ops2_gplusplus) for t in self.tmpOpt1: comp.addOpt1(t) for t in self.tmpOpt2: comp.addOpt2(t) comps.append(comp) self.tmpOpt1 = [] self.tmpOpt2 = [] for comp in comps: self.listOfComp.append(comp) with open(path, "w") as f: tmp = str(self.conf) tmp = tmp.replace("\'", "\"") f.write(tmp) return config_filename
def split_data(args): data_type = args.data_type conf = Conf(args) base_path = conf.raw_data save_path = conf.csv_data save_mkdir(save_path + '/' + data_type + '/data/') save_mkdir(save_path + '/' + data_type + '/flag/') patients = pd.read_csv(base_path + '/Patients.csv') ids = patients['subjectId'] id_index = patients['sabpId'] a = id_index[ids == ids[0]] observ_types = pd.read_csv(base_path + '/Observation-type.csv') types = {} for i in range(0, len(observ_types)): types[observ_types.loc[i]['code']] = observ_types.loc[i]['display'] observ_devices = pd.read_csv(base_path + '/Observation-device.csv') devices = {} for i in range(0, len(observ_devices)): devices[observ_devices.loc[i]['code']] = observ_devices.loc[i]['display'] observ_locs = pd.read_csv(base_path + '/Observation-location.csv') locs = {} for i in range(0, len(observ_locs)): locs[observ_locs.loc[i]['code']] = observ_locs.loc[i]['display'] data = pd.read_csv(base_path + '/observations.csv') if data_type == 'env': data = data.loc[data['device'] == 408746007] # env elif data_type == 'clinical': data = data.loc[data['device'] != 408746007] # Clinical data['datetimeObserved'] = pd.to_datetime(data['datetimeObserved']) data_new = pd.DataFrame(columns=['subject', 'datetimeObserved', 'type', 'location', 'value']) data_new['subject'] = data['subject'] data_new['datetimeObserved'] = data['datetimeObserved'] data_new['type'] = data['type'].map(types) data_new['location'] = data['location'].map(locs) if data_type == 'env': data_new['value'] = data['valueBoolean'] data_new = data_new.loc[data_new['type'].isin(['Movement', 'Door', 'Does turn on domestic appliance', 'Light'])] bools = {True: 1, False: 0} data_new['value'] = data_new['value'].map(bools) elif data_type == 'clinical': data_new['value'] = data['valueQuantity'] data_new = data_new[data_new.value.notna()] for i in range(0, len(ids)): idx = ids[i] name_data = str(id_index[idx == ids][i]) + "_observation.csv" if data_type == 'env': d = data_new.loc[data_new['subject'] == idx, ['datetimeObserved', 'location', 'value']] elif data_type == 'clinical': d = data_new.loc[data_new['subject'] == idx, ['datetimeObserved', 'type', 'value']] d.to_csv(save_path + '/' + data_type + '/data/' + name_data) env_data = deepcopy(data_new) #if data_type == 'clinical': # return d = pd.read_csv(base_path + '/Flag-category.csv') flag_types = {} for i in range(0, len(d)): flag_types[d.loc[i]['code']] = d.loc[i]['display'] d = pd.read_csv(base_path + '/Flag-type.csv') flag_elements = {} for i in range(0, len(d)): flag_elements[d.loc[i]['code']] = d.loc[i]['display'] data = pd.read_csv(base_path + '/Flags.csv') data['datetimeRaised'] = pd.to_datetime(data['datetimeRaised']) data_new = pd.DataFrame(columns=['flagId', 'subject', 'datetimeObserved', 'element', 'type']) data_new['subject'] = data['subject'] data_new['datetimeObserved'] = data['datetimeRaised'] data_new['type'] = data['category'].map(flag_types) data_new['element'] = data['type'].map(flag_elements) data_new['flagId'] = data['flagId'] d = pd.read_csv(base_path + '/FlagValidations.csv') val_df = pd.DataFrame(columns=['flagId', 'valid']) val_df['flagId'] = d['flag'] val_df['valid'] = d['valid'] data_new = pd.merge(data_new, val_df, on='flagId') flag_data = deepcopy(data_new) for i in range(0, len(ids)): idx = ids[i] name_data = str(id_index[idx == ids][i]) + "_flags.csv" d = data_new.loc[data_new['subject'] == idx, ['datetimeObserved', 'element', 'type', 'valid']] d.to_csv(save_path + '/' + data_type + "/flag/" + name_data) summation = [] for i in range(0, len(ids)): idx = ids[i] name_data = str(id_index[idx == ids][i]) + "obs_flag.csv" f_data = flag_data.loc[flag_data['subject'] == idx, ['datetimeObserved', 'element', 'type', 'valid']] e_data = env_data.loc[env_data['subject'] == idx, ['datetimeObserved', 'location', 'value']] f_data['datetimeObserved'] = f_data['datetimeObserved'].dt.date e_data['date'] = e_data['datetimeObserved'].dt.date e_data = e_data.loc[e_data['date'].isin(f_data['datetimeObserved'])] e_data['Patient id'] = int(id_index[idx == ids][i]) e_data['element'] = None e_data['type'] = None e_data['valid'] = None for sub_date in f_data['datetimeObserved']: e_data.loc[e_data['date'] == sub_date, 'element'] = f_data.loc[f_data['datetimeObserved'] == sub_date][ 'element'].values[0] e_data.loc[e_data['date'] == sub_date, 'type'] = f_data.loc[f_data['datetimeObserved'] == sub_date][ 'type'].values[0] e_data.loc[e_data['date'] == sub_date, 'valid'] = f_data.loc[f_data['datetimeObserved'] == sub_date][ 'valid'].values[0] summation.append(e_data) # e_data.to_csv(save_path + '/' + data_type + "/all_in_one/" + name_data) summation = pd.concat(summation) summation.to_csv(save_path + '/' + data_type + "/merged.csv")