def add_track(neuros, sourcename, targetname, metadata, recording=None): if sourcename != None: util.copy_file(sourcename, targetname) # Create DB entry destination = neuros.hostpath_to_neurospath(targetname) record = (metadata["title"], None, metadata["artist"], metadata["album"], metadata["genre"], recording, metadata["length"], metadata["size"] // 1024, destination) # Add entry to database neuros.db["audio"].add_record(record) try: filename = path.join(*neuros.mountpoint_parts + [neuros.DB_DIR, 'tracks.txt']) tracknum_file = file(filename, 'a') tracknum = metadata.get('tracknumber', None) if tracknum is None: tracknum = metadata.get('title', targetname) try: tracknum.lower() except AttributeError: pass tracknum_file.write('%s\t%s\n' % (destination, tracknum)) tracknum_file.close() except IOError: pass # Fail silently if we can't write the file.
def create_profile(auth_info,**kwargs): if auth_info['code'] == 1: return json.dumps(auth_info) username = auth_info['username'] try: data = request.get_json()['params'] para = eval(str(data["partion"])) name = data['profile'] filename = str(name) util.copy_file(filename) util.write_file(filename,para) util.replace_url(filename,str(data['url'])) ret = profile_create(app.config['cobbler_url'],app.config['cobbler_user'],app.config['cobbler_password'],filename,str(data['distro']),'/var/lib/cobbler/kickstarts/%s'%filename) print "xiaoluoge" print ret if str(ret['result']) == "True": data = {"distro":str(data['distro']),"os":filename,"ks":'/var/lib/cobbler/kickstarts/%s'%filename} app.config['cursor'].execute_insert_sql('profile', data) util.write_log('api').info(username, "create cobbler profile %s success" %filename) else: util.write_log('api').info(username, "create cobbler profile %s faile" % data['ip']) return json.dumps({'code':0,'result':'create %s success' % filename}) except: util.write_log('api').error('create cobbler error:%s' % traceback.format_exc()) return json.dumps({'code':1,'errmsg': 'create cobbler failed'})
def queryDataByImgId(img_id): with open('/home/nvidia/Horus/config.cnf') as json_data: cnf = json.load(json_data) db = create_engine(cnf['db']) result = db.execute( text('select * from tb_object where img_id = :img_id'), {'img_id': img_id}) data = result.fetchall() if len(data) > 0: img = ARC_PATH + str(img_id) + '.jpg' if os.path.exists(img): imgs = DETECT_PATH + str(img_id) + ".jpg" util.copy_file(img, imgs) for s in data: if len(s[1]) > 0: if os.path.exists(imgs): im = load_image(imgs, 0, 0) drawbox(im, img_id, round(float((s[3]))), round(float((s[4]))), round(float((s[5]))), round(float((s[6]))), 3)
def convert_music(): logging.info("convert music....") raw_music_dir = os.path.join(config.RAW_RESOURCE_PATH, "common", "music") for item in os.listdir(raw_music_dir): if item == "." or item == ".." or (not item.endswith(".mp3")): continue raw_music_path = os.path.join(raw_music_dir, item) bin_music_path = os.path.join(config.BIN_RESOURCE_PATH, "music", item) util.copy_file(raw_music_path, bin_music_path)
def sample_files_from_directory(directory, count): images = filter_images_in_path(directory) sample_count = min(count, len(images)) print("Sample Count: {}".format(sample_count)) sampled_images = random.sample(images, sample_count) destination_root = os.path.join(directory, "..") sampled_directory = os.path.join(destination_root, "sampled") sampled_directory = create_and_return_directory(sampled_directory) for image in sampled_images: full_path = os.path.join(directory, image) copy_file(full_path, sampled_directory)
def zip_align(sample_file_name): # Align the file try: if enable_logging: u.logger('Zip: ' + sample_file_name) u.copy_file(sample_file_name, sample_file_name + '_old.apk') popen(config.zipalign_path + ' -f 8 ' + sample_file_name + '_old.apk' + ' ' + sample_file_name) os.remove(sample_file_name + '_old.apk') except OSError as ex: raise e.OpenToolException( str(ex) + '\nUnable to zipalign ' + sample_file_name)
def convert_images(node, out): imageNodes = node.getElementsByTagName("image") image_count = len(imageNodes) out.write(struct.pack("B", image_count)) for imageNode in imageNodes: image_file = imageNode.getAttribute("file") relative_image_file = image_file[3:] key = util.get_path_key(relative_image_file) raw_image_path = os.path.join(config.RAW_RESOURCE_PATH, config.TARGET, relative_image_file) bin_image_path = os.path.join(config.BIN_RESOURCE_PATH, config.TARGET, str(key)) util.copy_file(raw_image_path, bin_image_path) out.write(struct.pack("I", key))
def make_executable_Haskell (): if not util.file_exists("solution.hs"): raise Exception("solution.hs does not exist") util.del_file("work") util.del_file("work.hi") util.del_file("work.o") util.copy_file("solution.hs", "work.hs") f = open("work.hs", "a") print >>f, """main = do print "OK" """ f.close() util.system("ghc -O3 work.hs") if not util.file_exists("work"): raise Exception("error in haskell compilation") util.del_file("work") util.del_file("work.hi") util.del_file("work.o")
def make_corrects_RunHaskell (): for f in glob.glob("*.cor"): util.del_file(f) inps = sorted(glob.glob("*.inp")) for inp in inps: tst = os.path.splitext(inp)[0] util.copy_file("solution.hs", "work.hs") if util.file_exists("judge.hs"): os.system("cat judge.hs >> work.hs") f = open("work.hs", "a") print >>f, "main = do" for line in open(tst+".inp").readlines(): line = line.rstrip() if line.startswith("let "): print >>f, " %s" % line # elif line.startswith("deb "): # print >>f, ' hPutStrLn stderr "%s"' % line else: print >>f, " print (%s)" % line f.close() util.system("runhaskell work.hs >%s.cor" % (tst, ))
def perform_blackbox_test(config): out_dir = config["out_dir"] p4_input = config["p4_input"] if out_dir == OUT_DIR: out_dir = out_dir.joinpath(p4_input.stem) util.check_dir(out_dir) util.copy_file(p4_input, out_dir) config["out_dir"] = out_dir config["p4_input"] = p4_input main_formula, pkt_range = get_main_formula(config) if main_formula == None or not pkt_range: return util.EXIT_FAILURE conditions = set() # FIXME: Another hack to deal with branch conditions we cannot control for child in main_formula.children()[pkt_range]: conditions |= get_branch_conditions(child) cond_tuple = dissect_conds(config, conditions) stf_str = build_test(config, main_formula, cond_tuple, pkt_range) # finally, run the test with the stf string we have assembled # and return the result of course return run_stf_test(config, stf_str)
def get_prog_semantics(config): p4_input = config["p4_input"] out_dir = config["out_dir"] py_file = Path(f"{out_dir}/{p4_input.stem}.py") fail_dir = out_dir.joinpath("failed") result = run_p4_to_py(p4_input, py_file, config) if result.returncode != util.EXIT_SUCCESS: log.error("Failed to translate P4 to Python.") util.check_dir(fail_dir) with open(f"{fail_dir}/error.txt", 'w+') as err_file: err_file.write(result.stderr.decode("utf-8")) util.copy_file([p4_input, py_file], fail_dir) return None, result.returncode package, result = get_z3_formulization(py_file) pipe_val = package.get_pipes() if result != util.EXIT_SUCCESS: if fail_dir and result != util.EXIT_SKIPPED: util.check_dir(fail_dir) util.copy_file([p4_input, py_file], fail_dir) return pipe_val, result return pipe_val, util.EXIT_SUCCESS
def diff_files(passes, pass_dir, p4_file): p4_name = p4_file.name.stem for index, p4_pass in enumerate(passes[1:]): pass_before = passes[index - 1] pass_after = passes[index] diff_dir = f"{pass_dir}/{p4_name}" util.check_dir(diff_dir) diff_file = f"{diff_dir}/{p4_name}_{p4_pass}.diff" diff_cmd = "diff -rupP " diff_cmd += "--label=\"before_pass\" --label=\"after_pass\" " diff_cmd += f"{pass_before} {pass_after}" diff_cmd += f"> {diff_file}" log.debug("Creating a diff of the file") log.debug("Command: %s", diff_cmd) util.exec_process(diff_cmd) if os.stat(diff_file).st_size == 0: os.remove(diff_file) else: after_name = f"{diff_dir}/{p4_name}_{p4_pass}{p4_file.suffix}" util.copy_file(pass_after, after_name) og_name = f"{diff_dir}/{p4_name}_original{p4_file.suffix}" util.copy_file(p4_file, og_name) return util.EXIT_SUCCESS
def main(args): """The main process of train. :param args: an object of the arguments. """ #================================================================================================ # Check whether the command line arguments are valid or not. #================================================================================================ start_time = time.time() # Path to find gnuplot. gnuplot_exe_list = [r'"C:\Program Files\gnuplot\pgnuplot.exe"', r'".\gnuplot\bin\pgnuplot.exe"', "/usr/bin/gnuplot","/usr/local/bin/gnuplot"] # Get the current path. current_path = os.path.dirname(os.path.realpath(__file__)) # Judge whether the path contains Chinese character or not. current_path_uni = unicode(current_path, "gbk") if check_contain_chinese(current_path_uni): print 'Error: the path can not contain Chinese characters.' return False file_list = args.files # Judge whether binary classification or multiclass classification. if len(file_list) == 2: bi_or_multi = 0 elif len(file_list) > 2: bi_or_multi = 1 else: print 'The number of input files must be more than 1.' return False preprocess_result = [] for i in file_list: result = data_preprocess(i, const.TEMP_DIR) preprocess_result.append(result) if False in preprocess_result: print 'There exist some files that do not satisfy the LIBSVM format.' return False else: new_file_list = preprocess_result trans_labels(new_file_list) if args.v == 'i' and args.i_files is not None: for i in args.i_files: result = data_preprocess(i, const.TEMP_INDEPENDENT_DIR) preprocess_result.append(result) if False in preprocess_result: print 'There exist some independent test files that do not satisfy the LIBSVM format.' return False else: independent_file_list = preprocess_result predict_params = '-q' # optional parameters of svm_predict() svm_params = '-h 0 -m 1024 -q' # c_result = check_c_g(args.c, 'c') # if c_result is False: # return False # g_result = check_c_g(args.g, 'g') # if g_result is False: # return False # if type(c_result) != type(g_result): # print 'Both the arguments c and g should be specified values or both of them are ranges.' # return False if args.opt is None or args.opt == '0': c_range = xrange(-5, 11, 3) g_range = xrange(-10, 6, 3) elif args.opt == '1': c_range = xrange(-5, 11) g_range = xrange(-10, 6) if args.b == '1': svm_params += (' -b ' + str(args.b)) predict_params += (' -b ' + str(args.b)) b = args.b elif args.b == '0': b = args.b if args.p == 'ACC': metric = 0 elif args.p == 'MCC': metric = 1 elif args.p == 'AUC': metric = 2 if args.m is not None: model_file_name = args.m else: print 'Error: the name of the model can not be omitted.' print 'A value should be given to the parameter -m.' return False cpu_core = mul.cpu_count() if args.cpu is None: process_num = cpu_core elif 0 < args.cpu <= cpu_core: process_num = args.cpu elif args.cpu < 0 or args.cpu > cpu_core: process_num = cpu_core print 'Warning: The value of -cpu should be larger than 0' print 'and less than or equal to the number of cpu core in your computer.' print 'The value has been set as the default(number of all cpu cores in your computer).' time.sleep(2) if args.v == 'i' and args.i_files is None: print 'At least one independent dataset file should be included.' return False #================================================================================================ # Args check finished here. #================================================================================================ #================================================================================================ # Parameter selection starts. #================================================================================================ print 'Parameter selection is in processing...\n' results = param_selection(new_file_list, metric, svm_params, process_num, c_range, g_range, bi_or_multi) print 'Parameter selection completed.\n' c = results[0][0] g = results[0][1] print 'The optimal parameters for the dataset are: C = ', 2 ** c, ' gamma = ', 2 ** g print '\n' if args.v is None: print 'The performance evaluations for the optimal parameter(s) are as follows:\n' if bi_or_multi == 0: print 'ACC = %.4f' % results[1][1][0] print 'MCC = %.4f' % results[1][1][1] print 'AUC = %.4f' % results[1][1][2] print 'Sn = %.4f' % results[1][1][3] print 'Sp = %.4f\n' % results[1][1][4] elif bi_or_multi == 1: print 'ACC = %.4f' % results[1] #================================================================================================ # Parameter selection finished. #================================================================================================ # elif type(c_result) == int: # c = c_result # g = g_result c_cost = 2 ** c g_gamma = 2 ** g svm_params += (' -c ' + str(c_cost) + ' -g ' + str(g_gamma)) y_all = [] x_all = [] for file in new_file_list: y, x = svm_read_problem(file) y_all.extend(y) x_all.extend(x) dataset_size = len(x_all) pkl_y = current_path + const.TEMP_DIR.lstrip('.') + 'dataset_y.pkl' pkl_x = current_path + const.TEMP_DIR.lstrip('.') + 'dataset_x.pkl' cPickle.dump(y_all, open(pkl_y, 'wb')) cPickle.dump(x_all, open(pkl_x, 'wb')) #================================================================================================ # Model training & cross validation. #================================================================================================ print 'Model training is in processing...' final_model_file = current_path + const.FINAL_RESULTS_PATH + model_file_name middle_model_file = current_path + const.FINAL_RESULTS_PATH + 'middle.model' # jackknife cross validation. if args.v =='j': cross_validation(y_all, x_all, dataset_size, svm_params, predict_params, bi_or_multi) # k-fold cross validation. elif args.v is not None and args.v.isdigit() == True and int(args.v) > 1: fold = int(args.v) cross_validation(y_all, x_all, fold, svm_params, predict_params, bi_or_multi) y_all = cPickle.load(open(pkl_y, 'rb')) x_all = cPickle.load(open(pkl_x, 'rb')) final_model = svm_train(y_all, x_all, svm_params) svm_save_model(middle_model_file, final_model) #================================================================================================ # Add the parameters to the SVM model file. #================================================================================================ middle_list = [] with open(middle_model_file) as f: for i in f: middle_list.append(i) param_line = 'c,' + str(c) + ',g,' + str(g) + ',b,' + str(b) + ',bi_or_multi,' + str(bi_or_multi) with open(final_model_file, 'w') as f: f.write(param_line) f.write('\n') for i in middle_list: f.write(i) print 'Model training completed.' print 'The model has been saved. You can check it here:' if sys.platform.startswith('win'): print final_model_file.replace('/', '\\'), '\n' else: print final_model_file.replace('\\', '/'), '\n' if os.path.isfile('cross_validation.png'): try: os.remove('cross_validation.png') except OSError: time.sleep(0.1) try: os.remove('cross_validation.png') except OSError: pass #================================================================================================ # Independent dataset test. #================================================================================================ if 'independent_file_list' in locals().keys(): print 'The independent test dataset is found.\n' test_y = [] test_x = [] for file in independent_file_list: y, x = svm_read_problem(file) test_y.extend(y) test_x.extend(x) model = svm_load_model(middle_model_file) p_label, p_acc, p_val = svm_predict(test_y, test_x, model, predict_params) labels = model.get_labels() deci = [labels[0]*val[0] for val in p_val] check_gnuplot_exe() roc_output = 'independent_roc.png' title = 'the test dataset' evals = performance(test_y, p_label, deci, roc_output, title, True, bi_or_multi) if bi_or_multi == 0: dest_file = current_path + const.FINAL_RESULTS_PATH + roc_output copy_file(roc_output, dest_file) print 'The performance evaluations of the final model are as follows:\n' print 'ACC = %.4f' % evals[0] print 'MCC = %.4f' % evals[1] print 'AUC = %.4f' % evals[2] print 'Sn = %.4f' % evals[3] print 'Sp = %.4f\n' % evals[4] print "The ROC curve has been saved. You can check it here: " if sys.platform.startswith('win'): print dest_file.replace('/', '\\'), '\n' else: print dest_file.replace('\\', '/'), '\n' if os.path.isfile('independent_roc.png'): try: os.remove('independent_roc.png') except OSError: time.sleep(0.1) try: os.remove('independent_roc.png') except OSError: pass elif bi_or_multi == 1: print 'The performance evaluations of the final model are as follows:\n' print 'ACC = %.4f' % evals print '\n' print 'Done.' print 'Used time: %.2fs' % (time.time() - start_time)
params['test_episodes'] = args.episodes params['random_seed'] = seed_generator(params['random_seed'], params['runs']) params['start_time'] = start_time # Set up directory structure if training #if params['mode'] == 'train': # Create experiment dir params['exp_dir'] = create_dir( Path(os.path.join( LOG_DIR, params['env_type'], params['env_name'], str(time.strftime("%Y-%m-%d_%H-%M"))))) # Safe experiment parameters to log dir copy_file( PARAMS_FILE, str(Path(os.path.join(params['exp_dir'], args.params)))) # Add commit version for reproducability label = subprocess.check_output(['git', 'rev-parse', 'HEAD']).decode('ascii').strip() f = open(os.path.join(params['exp_dir'], 'RL2go_commit-version.txt'), 'w+') f.write('{}\r\n'.format(label)) f.close() # Prepare the tensorflow configuration settings # TODO: Make better suitable for multicore processing params['cores'] = 1 #params['gpus'] = len(get_available_gpus()) if params['use_gpu'] else 0 params['gpus'] = 1 if params['use_gpu'] else 0 params['tf_config'] = tf.ConfigProto( intra_op_parallelism_threads=params['cores'], inter_op_parallelism_threads=params['cores'],
def cross_validation(label_list, vector_list, fold, svm_params, predict_params, bi_or_multi): """Do cross validation. :param label_list: list of labels. :param vector_list: list of vectors. :param fold: the fold of cross validation. """ datasetsize = len(label_list) result = dataset_split_cv(label_list, vector_list, fold) if result == False: return False else: split_vector_list, split_label_list = result len_vector = len(split_vector_list) len_label = len(split_label_list) if len_vector != len_label: print 'Error: The length of the labels is not equal to that of the vectors.' return False deci = [] original_labels = [] acc_list = [] mcc_list = [] auc_list = [] sn_list = [] sp_list = [] if bi_or_multi == 0: if fold != datasetsize: for i in range(len_vector): train_vector_list = [] train_label_list = [] #test_vector_list = [] #test_label_list = [] test_vector_list = split_vector_list[i] test_label_list = split_label_list[i] original_labels.extend(test_label_list) for j in range(len_vector): if j != i: train_vector_list.extend(split_vector_list[j]) train_label_list.extend(split_label_list[j]) m = svm_train(train_label_list, train_vector_list, svm_params) p_label, p_acc, p_val = svm_predict(test_label_list, test_vector_list, m, predict_params) labels = m.get_labels() subdeci = [labels[0]*val[0] for val in p_val] deci += subdeci evals = performance(test_label_list, p_label, subdeci, bi_or_multi=bi_or_multi) acc_list.append(evals[0]) mcc_list.append(evals[1]) auc_list.append(evals[2]) sn_list.append(evals[3]) sp_list.append(evals[4]) acc_average = sum(acc_list) / len(acc_list) mcc_average = sum(mcc_list) / len(mcc_list) auc_average = sum(auc_list) / len(auc_list) sn_average = sum(sn_list) / len(sn_list) sp_average = sum(sp_list) / len(sp_list) label_all = [] for i in split_label_list: label_all.extend(i) check_gnuplot_exe() roc_output = 'cross_validation.png' title = 'cross validation' current_path = os.path.dirname(os.path.realpath(__file__)) roc_data_file = current_path + const.GEN_FILE_PATH + 'roc_data' plot_roc(deci, label_all, roc_output, title, True, roc_data_file) del_file(roc_data_file) dest_file = current_path + const.FINAL_RESULTS_PATH + roc_output copy_file(roc_output, dest_file) elif fold == datasetsize: predicted_labels = [] #deci_list = [] for i in range(len_vector): train_vector_list = [] train_label_list = [] #test_vector_list = [] #test_label_list = [] test_vector_list = split_vector_list[i] test_label_list = split_label_list[i] original_labels.extend(test_label_list) for j in range(len_vector): if j != i: train_vector_list.extend(split_vector_list[j]) train_label_list.extend(split_label_list[j]) m = svm_train(train_label_list, train_vector_list, svm_params) p_label, p_acc, p_val = svm_predict(test_label_list, test_vector_list, m, predict_params) labels = m.get_labels() subdeci = [labels[0]*val[0] for val in p_val] deci += subdeci predicted_labels.extend(p_label) evals = performance(original_labels, predicted_labels, deci, bi_or_multi=bi_or_multi) acc_average = evals[0] mcc_average = evals[1] auc_average = evals[2] sn_average = evals[3] sp_average = evals[4] label_all = [] for i in split_label_list: label_all.extend(i) check_gnuplot_exe() roc_output = 'cross_validation.png' title = 'cross validation' current_path = os.path.dirname(os.path.realpath(__file__)) roc_data_file = current_path + const.GEN_FILE_PATH + 'roc_data' plot_roc(deci, label_all, roc_output, title, True, roc_data_file) del_file(roc_data_file) dest_file = current_path + const.FINAL_RESULTS_PATH + roc_output copy_file(roc_output, dest_file) #print acc_list acc_re = 'ACC = %.4f' % acc_average mcc_re = 'MCC = %.4f' % mcc_average auc_re = 'AUC = %.4f' % auc_average sn_re = 'Sn = %.4f' % sn_average sp_re = 'Sp = %.4f\n' % sp_average eval_re = [acc_re, mcc_re, auc_re, sn_re, sp_re] print ('The cross validation results are as follows:') print acc_re print mcc_re print auc_re print sn_re print sp_re print "The ROC curve has been saved. You can check it here: " if sys.platform.startswith('win'): print dest_file.replace('/', '\\'), '\n' else: print dest_file.replace('\\', '/'), '\n' result_file = current_path + const.FINAL_RESULTS_PATH + "cv_eval_results.txt" with open(result_file, 'w') as f: f.write('The cross validation results are as follows:\n') for i in eval_re: f.write(i) f.write("\n") prob_file = current_path + const.FINAL_RESULTS_PATH + "probability_values.txt" with open(prob_file, 'w') as f: for i, j in zip(original_labels, deci): f.write(str(i)) f.write('\t') f.write(str(j)) f.write("\n") elif bi_or_multi == 1: if fold != datasetsize: for i in range(len_vector): train_vector_list = [] train_label_list = [] #test_vector_list = [] #test_label_list = [] test_vector_list = split_vector_list[i] test_label_list = split_label_list[i] for j in range(len_vector): if j != i: train_vector_list.extend(split_vector_list[j]) train_label_list.extend(split_label_list[j]) m = svm_train(train_label_list, train_vector_list, svm_params) p_label, p_acc, p_val = svm_predict(test_label_list, test_vector_list, m, predict_params) labels = m.get_labels() subdeci = [labels[0]*val[0] for val in p_val] deci += subdeci evals = performance(test_label_list, p_label, subdeci, bi_or_multi=bi_or_multi) acc_list.append(evals) acc_average = sum(acc_list) / len(acc_list) elif fold == datasetsize: predicted_labels = [] original_labels = [] for i in range(len_vector): train_vector_list = [] train_label_list = [] #test_vector_list = [] #test_label_list = [] test_vector_list = split_vector_list[i] test_label_list = split_label_list[i] original_labels.extend(test_label_list) for j in range(len_vector): if j != i: train_vector_list.extend(split_vector_list[j]) train_label_list.extend(split_label_list[j]) m = svm_train(train_label_list, train_vector_list, svm_params) p_label, p_acc, p_val = svm_predict(test_label_list, test_vector_list, m, predict_params) labels = m.get_labels() subdeci = [labels[0]*val[0] for val in p_val] deci += subdeci predicted_labels.extend(p_label) evals = performance(original_labels, predicted_labels, deci, bi_or_multi=bi_or_multi) acc_average = evals print ('The cross validation results are as follows:') print 'ACC = %.4f' % acc_average #def cv_jackknife(label_list, vector_list, fold, svm_params, predict_params, bi_or_multi): """Do jackknife cross validation.
def copy_runway(self, template): """Copy runway template to proper directory.""" template_file = os.path.join(self.fixtures_dir, 'runway-{}.yml'.format(template)) copy_file(template_file, os.path.join(self.sources_test_dir, 'runway.yml'))
def test_copy_file(): ret = util.copy_file('a.txt', 'a1.txt') return 'copy OK: ' + str(ret)
[ os.path.join(util.get_script_path(), "data", args.ide, "vmoptions.README"), os.path.join(util.get_script_path(), "tmp", "root", "etc", args.ide, "%s.vmoptions.README" % args.ide) ], [ os.path.join(util.get_script_path(), "data", args.ide, "debian", "sysctl-99.conf"), os.path.join(util.get_script_path(), "tmp", "root", "etc", "sysctl.d", "99-%s.conf" % args.ide) ], ] for copyTuple in copyList: if not util.copy_file(copyTuple[0], copyTuple[1], logger): cleanup(-1, logger) # Fixing vmoptions file(s) file1 = open( os.path.join(util.get_script_path(), "tmp", "root", "etc", args.ide, "%s.vmoptions.README" % args.ide), "a") file2 = open( os.path.join(util.get_script_path(), "tmp", "root", "usr", "share", "jetbrains", args.ide, "bin", "%s.vmoptions" % args.ide), "r") file3 = open( os.path.join(util.get_script_path(), "tmp", "root", "usr", "share", "jetbrains", args.ide, "bin", "%s.vmoptions2" % args.ide), "w") file1.write("\nOriginal pycharm.vmoptions:\n") for line in file2:
def run_tofino_test(out_dir, p4_input, stf_file_name): # we need to change the working directory # tofino scripts make some assumptions where to dump files prog_name = p4_input.stem # we need to create a specific test dir in which we can run tests test_dir = out_dir.joinpath("test_dir") util.check_dir(test_dir) util.copy_file(stf_file_name, test_dir) template_name = test_dir.joinpath(f"{prog_name}.py") # use a test template that runs stf tests util.copy_file(f"{FILE_DIR}/tofino_test_template.py", template_name) # initialize the target install log.info("Building the tofino target...") config_cmd = f"{TOFINO_DIR}/pkgsrc/p4-build/configure " config_cmd += "--with-tofino --with-p4c=bf-p4c " config_cmd += f"--prefix={TOFINO_DIR}/install " config_cmd += f"--bindir={TOFINO_DIR}/install/bin " config_cmd += f"P4_NAME={prog_name} " config_cmd += f"P4_PATH={p4_input.resolve()} " config_cmd += "P4_VERSION=p4-16 " config_cmd += "P4_ARCHITECTURE=tna " result = util.exec_process(config_cmd, cwd=out_dir) if result.returncode != util.EXIT_SUCCESS: return result, result.stdout, result.stderr # create the target make_cmd = f"make -C {out_dir} " result = util.exec_process(make_cmd) if result.returncode != util.EXIT_SUCCESS: return result, result.stdout, result.stderr # install the target in the tofino folder make_cmd = f"make install -C {out_dir} " result = util.exec_process(make_cmd) if result.returncode != util.EXIT_SUCCESS: return result, result.stdout, result.stderr procs = [] test_proc = None # start the target in the background log.info("Starting the tofino model...") os_env = os.environ.copy() os_env["SDE"] = f"{TOFINO_DIR}" os_env["SDE_INSTALL"] = f"{TOFINO_DIR}/install" model_cmd = f"{TOFINO_DIR}/run_tofino_model.sh " model_cmd += f"-p {prog_name} " proc = util.start_process(model_cmd, preexec_fn=os.setsid, env=os_env, cwd=out_dir) procs.append(proc) # start the binary for the target in the background log.info("Launching switchd...") os_env = os.environ.copy() os_env["SDE"] = f"{TOFINO_DIR}" os_env["SDE_INSTALL"] = f"{TOFINO_DIR}/install" switch_cmd = f"{TOFINO_DIR}/run_switchd.sh " switch_cmd += "--arch tofino " switch_cmd += f"-p {prog_name} " proc = util.start_process(switch_cmd, preexec_fn=os.setsid, env=os_env, cwd=out_dir) procs.append(proc) # wait for a bit time.sleep(2) # finally we can run the test log.info("Running the actual test...") test_cmd = f"{TOFINO_DIR}/run_p4_tests.sh " test_cmd += f"-t {test_dir} " os_env = os.environ.copy() os_env["SDE"] = f"{TOFINO_DIR}" os_env["SDE_INSTALL"] = f"{TOFINO_DIR}/install" # inserting this path is necessary for the tofino_test_template.py os_env["PYTHONPATH"] = f"${{PYTHONPATH}}:{ROOT_DIR}" test_proc = util.start_process(test_cmd, env=os_env, cwd=out_dir) def signal_handler(sig, frame): log.warning("run_tofino_test: Caught Interrupt, exiting...") cleanup(procs) os.kill(test_proc.pid, signal.SIGINT) os.kill(test_proc.pid, signal.SIGTERM) sys.exit(1) signal.signal(signal.SIGINT, signal_handler) signal.signal(signal.SIGTERM, signal_handler) stdout, stderr = test_proc.communicate() cleanup(procs) return test_proc, stdout, stderr
def setup_greasemonkey(self): path_gm = os.path.join(self.profiledir, 'gm_scripts') os.mkdir(path_gm) util.copy_file(os.path.join(self.browserdir, 'gm_config.xml'), os.path.join(path_gm, 'config.xml')) util.copy_file(os.path.join(self.distdir, 'pixplus.user.js'), path_gm) self.user_prefs['extensions.greasemonkey.stats.prompted'] = 'true'
def setup_scriptish(self): path_st = os.path.join(self.profiledir, 'scriptish_scripts') os.mkdir(path_st) util.copy_file(os.path.join(self.browserdir, 'scriptish-config.json'), path_st) util.copy_file(os.path.join(self.distdir, 'pixplus.user.js'), path_st) os.utime(os.path.join(path_st, 'pixplus.user.js'), (2000000000, 2000000000))
def copy_runway(self, template): """Copy runway template to proper directory.""" template_file = os.path.join(self.template_dir, 'runway-{}.yml'.format(template)) copy_file(template_file, os.path.join(self.base_dir, 'runway.yml'))
def copy_template(self, template, name='main.tf'): """Copy template to Terraform module folder.""" template_file = os.path.join(self.template_dir, template) copy_file(template_file, os.path.join(self.tf_test_dir, name))
# Paths that contain templates, relative to this directory. templates_path = ['html'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8' # The master toctree document. #master_doc = 'index' # create content template for the homepage from util import rst2html, copy_file readme = rst2html('../README.txt', 'html/intro.html') readme = copy_file('../CHANGES.txt', 'changelog.rst') # Location of the PyAMF source root folder. import pyamf # General substitutions. project = 'PyAMF' url = 'http://pyamf.org' description = 'AMF for Python' copyright = "Copyright © 2007-%s The <a href='%s'>%s</a> Project. All rights reserved." % ( time.strftime('%Y'), url, project) # We look for the __init__.py file in the current PyAMF source tree # and replace the values accordingly. # # The full version, including alpha/beta/rc tags.
def main(args): """The main process of predict. :param args: an object of the arguments. """ start_time = time.time() current_path = os.path.dirname(os.path.realpath(__file__)) #Judge whether the path contains Chinese character or not current_path_uni = unicode(current_path, "gbk") if check_contain_chinese(current_path_uni): print 'Error: the path can not contain Chinese character.' return False #================================================================================================ # Inputfile preprocess. #================================================================================================ inputfile = args.inputfile result = data_preprocess(inputfile, const.TEMP_DIR) if result == False: print 'The input file does not satisfy the LIBSVM format.' return False else: new_inputfile = result if args.m == None: print 'Error: the name of the model can not be omitted.' print 'A value should be given to the parameter -m.' return False else: model_name = args.m model_file = current_path + const.FINAL_RESULTS_PATH + model_name if args.o == None: output_name = 'output_labels.txt' else: output_name = args.o output = current_path + const.FINAL_RESULTS_PATH + output_name #================================================================================================ # Processing the model file generated in the train step. #================================================================================================ param_dict = dict() model_list = [] with open(model_file) as f: train_params = f.readline().strip() for line in f: model_list.append(line) svm_model_file = current_path + const.FINAL_RESULTS_PATH + 'svm_model.model' with open(svm_model_file, 'w') as f: for i in model_list: f.write(i) param_list = train_params.split(',') for index in range(0, len(param_list), 2): param_dict[param_list[index]] = param_list[index+1] if 'c' in param_dict.keys() and 'g' in param_dict.keys() and 'b' in param_dict.keys() and 'bi_or_multi' in param_dict.keys(): c = int(param_dict['c']) g = int(param_dict['g']) b = param_dict['b'] bi_or_multi = int(param_dict['bi_or_multi']) print 'The parameters of RBF kernel:' print 'c = ', c, ' g = ', g #================================================================================================ # Predicting process. #================================================================================================ label_list = [] if args.labels !=None: with open(args.labels) as f: for i in f: if i.strip() == '+1': label_list.append(1.0) elif i.strip() == '-1': label_list.append(-1.0) else: label_list.append(int(i.strip())) predict_params = '-q' if b == '1': predict_params += (' -b ' + b) y ,x = svm_read_problem(new_inputfile) #print y model = svm_load_model(svm_model_file) model_labels = model.get_labels() p_label, p_acc, p_val = svm_predict(y, x, model, predict_params) #print p_label if bi_or_multi == 0: with open(output, 'w') as f: for i in p_label: if i == 1.0: f.write('+1') if i == -1.0: f.write('-1') f.write('\n') if len(label_list) != 0: check_gnuplot_exe() deci = [model_labels[0]*val[0] for val in p_val] roc_output = 'predicted_roc.png' title = 'the predicted dataset' #print '1' evals = performance(label_list, p_label, deci, roc_output, title, True, bi_or_multi) dest_file = current_path + const.FINAL_RESULTS_PATH + roc_output copy_file(roc_output, dest_file) print 'The performance evaluations are as follows:\n' print 'ACC = %.4f' % evals[0] print 'MCC = %.4f' % evals[1] print 'AUC = %.4f' % evals[2] print 'Sn = %.4f' % evals[3] print 'Sp = %.4f\n' % evals[4] if evals[2] !=0: print "The ROC curve has been saved. You can check it here: " if sys.platform.startswith('win'): print dest_file.replace('/', '\\'), '\n' else: print dest_file.replace('\\', '/'), '\n' if os.path.isfile('predicted_roc.png'): try: os.remove('predicted_roc.png') except OSError: time.sleep(0.1) try: os.remove('predicted_roc.png') except OSError: pass elif bi_or_multi == 1: with open(output, 'w') as f: for i in p_label: f.write(str(i)) f.write('\n') if len(label_list) != 0: deci = [model_labels[0]*val[0] for val in p_val] roc_output = 'predicted_roc.png' title = 'the predicted dataset' #print '1' evals = performance(label_list, p_label, deci, roc_output, title, True, bi_or_multi) print 'The performance evaluation is as follow:\n' print 'ACC = %.4f' % evals print "The predicted labels have been saved. You can check it here: " if sys.platform.startswith('win'): print output.replace('/', '\\'), '\n' else: print output.replace('\\', '/'), '\n' print("Done.") print("Used time: %.2fs" % (time.time() - start_time))
def test_delete(): util.copy_file('a.txt', 'a.txt.bak') util.copy_dir('d1', 'd1_bak') util.delete('a.txt') util.delete('d1', force=True) return 'delete OK'
def test_delete_file(): util.copy_file('a.txt', 'a.txt.bak') util.delete_file('a.txt') return 'delete OK'
def convert_font(): logging.info("convert font....") raw_font_path = os.path.join(config.RAW_RESOURCE_PATH, "common", "font", "font") bin_font_path = os.path.join(config.BIN_RESOURCE_PATH, "font") util.copy_file(raw_font_path, bin_font_path)
else: if len(filelist) == 1: print "Copying 1 new recording to host." else: print "Copying %d new recordings to host." \ % (len(new_recordings),) i = 1 for neuros_trackname in new_recordings: sourcename = neuros.neurospath_to_hostpath(neuros_trackname) basename = path.basename(sourcename) targetname = path.join(config.recordingdir, basename) print " %d. %s..." % (i, basename) i += 1 util.copy_file(sourcename, targetname) config.add_recording(neuros_trackname.lower()) # Only pack when necessary if audio_db.count_deleted() > 0: print " Packing audio database...", audio_db.pack() print " Done." if config.sort_database: audio_db.sort(path.join(*neuros.mountpoint_parts + [neuros.DB_DIR, 'tracks.txt'])) neuros.close_db("audio")
cleanup(-1, logger) # Copy Files copyList = [[os.path.join(util.get_script_path(), "data", args.ide, "start.sh"), os.path.join(util.get_script_path(), "tmp", "root", "usr", "bin", args.ide)], [os.path.join(util.get_script_path(), "data", args.ide, "icon.desktop"), os.path.join(util.get_script_path(), "tmp", "root", "usr", "share", "applications", "%s.desktop" % args.ide)], [os.path.join(util.get_script_path(), "data", args.ide, "vmoptions.README"), os.path.join(util.get_script_path(), "tmp", "root", "etc", args.ide, "%s.vmoptions.README" % args.ide)], [os.path.join(util.get_script_path(), "data", args.ide, "debian", "sysctl-99.conf"), os.path.join(util.get_script_path(), "tmp", "root", "etc", "sysctl.d", "99-%s.conf" % args.ide)], ] for copyTuple in copyList: if not util.copy_file(copyTuple[0], copyTuple[1], logger): cleanup(-1, logger) # Fixing vmoptions file(s) file1 = open(os.path.join(util.get_script_path(), "tmp", "root", "etc", args.ide, "%s.vmoptions.README" % args.ide), "a") file2 = open(os.path.join(util.get_script_path(), "tmp", "root", "usr", "share", "jetbrains", args.ide, "bin", "%s.vmoptions" % args.ide), "r") file3 = open(os.path.join(util.get_script_path(), "tmp", "root", "usr", "share", "jetbrains", args.ide, "bin", "%s.vmoptions2" % args.ide), "w") file1.write("\nOriginal pycharm.vmoptions:\n") for line in file2: file1.write(line) if "yjpagent" not in line: file3.write(line) file1.close()
def main(): torch.multiprocessing.set_sharing_strategy('file_system') print('[RUN] parse arguments') args, framework, optimizer, data_loader_dict, tester_dict = option.parse_options() print('[RUN] create result directories') result_dir_dict = util.create_result_dir(args.result_dir, ['src', 'log', 'snapshot', 'test']) util.copy_file(args.bash_file, args.result_dir) util.copy_dir('./src', result_dir_dict['src']) print('[RUN] create loggers') train_log_dir = os.path.join(result_dir_dict['log'], 'train') train_logger = SummaryWriter(train_log_dir) print('[OPTIMIZER] learning rate:', optimizer.param_groups[0]['lr']) n_batches = data_loader_dict['train'].__len__() global_step = args.training_args['init_iter'] print('') skip_flag = False while True: start_time = time.time() for train_data_dict in data_loader_dict['train']: batch_time = time.time() - start_time if skip_flag: skip_flag = False else: if global_step in args.snapshot_iters: snapshot_dir = os.path.join(result_dir_dict['snapshot'], '%07d' % global_step) util.save_snapshot(framework.network, optimizer, snapshot_dir) if global_step in args.test_iters: test_dir = os.path.join(result_dir_dict['test'], '%07d' % global_step) util.run_testers(tester_dict, framework, data_loader_dict['test'], test_dir) if args.training_args['max_iter'] <= global_step: break if global_step in args.training_args['lr_decay_schd'].keys(): util.update_learning_rate(optimizer, args.training_args['lr_decay_schd'][global_step]) train_loss_dict, train_time = \ train_network_one_step(args, framework, optimizer, train_data_dict, global_step) if train_loss_dict is None: skip_flag = True train_data_dict.clear() del train_data_dict else: if global_step % args.training_args['print_intv'] == 0: iter_str = '[TRAINING] %d/%d:' % (global_step, args.training_args['max_iter']) info_str = 'n_batches: %d, batch_time: %0.3f, train_time: %0.3f' % \ (n_batches, batch_time, train_time) train_str = util.cvt_dict2str(train_loss_dict) print(iter_str + '\n- ' + info_str + '\n- ' + train_str + '\n') for key, value in train_loss_dict.items(): train_logger.add_scalar(key, value, global_step) train_loss_dict.clear() train_data_dict.clear() del train_loss_dict, train_data_dict global_step += 1 start_time = time.time() if args.training_args['max_iter'] <= global_step: break train_logger.close()
def cross_validation(label_list, vector_list, fold, svm_params, predict_params, bi_or_multi): """Do cross validation. :param label_list: list of labels. :param vector_list: list of vectors. :param fold: the fold of cross validation. """ result = dataset_split_cv(label_list, vector_list, fold) if result == False: return False else: split_vector_list, split_label_list = result len_vector = len(split_vector_list) len_label = len(split_label_list) if len_vector != len_label: print 'Error: The length of the labels is not equal to that of the vectors.' return False deci = [] acc_list = [] mcc_list = [] auc_list = [] sn_list = [] sp_list = [] if bi_or_multi == 0: for i in range(len_vector): train_vector_list = [] train_label_list = [] #test_vector_list = [] #test_label_list = [] test_vector_list = split_vector_list[i] test_label_list = split_label_list[i] for j in range(len_vector): if j != i: train_vector_list.extend(split_vector_list[j]) train_label_list.extend(split_label_list[j]) m = svm_train(train_label_list, train_vector_list, svm_params) p_label, p_acc, p_val = svm_predict(test_label_list, test_vector_list, m, predict_params) labels = m.get_labels() subdeci = [labels[0] * val[0] for val in p_val] deci += subdeci evals = performance(test_label_list, p_label, subdeci, bi_or_multi=bi_or_multi) acc_list.append(evals[0]) mcc_list.append(evals[1]) auc_list.append(evals[2]) sn_list.append(evals[3]) sp_list.append(evals[4]) acc_average = sum(acc_list) / len(acc_list) mcc_average = sum(mcc_list) / len(mcc_list) auc_average = sum(auc_list) / len(auc_list) sn_average = sum(sn_list) / len(sn_list) sp_average = sum(sp_list) / len(sp_list) label_all = [] for i in split_label_list: label_all.extend(i) check_gnuplot_exe() roc_output = 'cross_validation.png' title = 'cross validation' current_path = os.path.dirname(os.path.realpath(__file__)) roc_data_file = current_path + const.GEN_FILE_PATH + 'roc_data' plot_roc(deci, label_all, roc_output, title, True, roc_data_file) del_file(roc_data_file) dest_file = current_path + const.FINAL_RESULTS_PATH + roc_output copy_file(roc_output, dest_file) print('The cross validation results are as follows:') print 'ACC = %.4f' % acc_average print 'MCC = %.4f' % mcc_average print 'AUC = %.4f' % auc_average print 'Sn = %.4f' % sn_average print 'Sp = %.4f\n' % sp_average print "The ROC curve has been saved. You can check it here: " if sys.platform.startswith('win'): print dest_file.replace('/', '\\'), '\n' else: print dest_file.replace('\\', '/'), '\n' elif bi_or_multi == 1: for i in range(len_vector): train_vector_list = [] train_label_list = [] #test_vector_list = [] #test_label_list = [] test_vector_list = split_vector_list[i] test_label_list = split_label_list[i] for j in range(len_vector): if j != i: train_vector_list.extend(split_vector_list[j]) train_label_list.extend(split_label_list[j]) m = svm_train(train_label_list, train_vector_list, svm_params) p_label, p_acc, p_val = svm_predict(test_label_list, test_vector_list, m, predict_params) labels = m.get_labels() subdeci = [labels[0] * val[0] for val in p_val] deci += subdeci evals = performance(test_label_list, p_label, subdeci, bi_or_multi=bi_or_multi) acc_list.append(evals) acc_average = sum(acc_list) / len(acc_list) print('The cross validation results are as follows:') print 'ACC = %.4f' % acc_average