def main(args): tsrc_distribution = pkg_resources.get_distribution("tsrc") # pylint: disable=no-member version = tsrc_distribution.version location = path.Path(tsrc_distribution.location) dirty = False short_hash = None rc, out = tsrc.git.run_git(location, "rev-parse", "--short", "HEAD", raises=False) if rc == 0: short_hash = out dirty = tsrc.git.is_dirty(location) message = "tsrc version %s" % version if short_hash: message += " - git: %s" % short_hash if dirty: message += " (dirty)" ui.info(message)
def run_from_config(config_dir, gpu_devices, script_name): """Call a script using parameters from an input JSON config file. CUDA_VISIBLE_DEVICES is set using the gpu_devices parameter passed to this Python script. """ config_dir = path.Path(config_dir) with open(config_dir / 'config.json', 'r') as f: config = json.load(f) config_string = "" for option in config: # NOTE(brendan): Booleans are passed as --no-<option-name> for False, # and --<option-name> for True. if isinstance(config[option], bool): yes_no = '' if config[option] else 'no-' config_string += ' --' + yes_no + option elif isinstance(config[option], list): config_string += ' --' + option for thing in config[option]: config_string += ' ' + str(thing) elif config[option] is not None: config_string += ' --' + option + ' ' + str(config[option]) cuda_visible_devices = 'CUDA_VISIBLE_DEVICES=' if gpu_devices == 'CPU': gpu_devices = '' else: gpu_warn = 'gpu_devices should be of the format <int>(,<int>)*' assert (re.match(r'^\d(,\d)*$', gpu_devices) is not None), gpu_warn cuda_visible_devices += gpu_devices if script_name.endswith('.py'): cmd = f'{cuda_visible_devices} python3 {script_name} {config_string}' else: cmd = f'{cuda_visible_devices} python3 -m {script_name} {config_string}' print(cmd) os.system(cmd)
def main(): parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter, ) parser.add_argument("--nogui", action="store_true", help="no gui") args = parser.parse_args() if args.nogui: connection_method = pybullet.DIRECT else: connection_method = pybullet.GUI now = datetime.datetime.utcnow() timestamp = now.strftime("%Y%m%d_%H%M%S.%f") root_dir = morefusion.utils.get_data_path( f"wkentaro/morefusion/ycb_video/synthetic_data/{timestamp}") root_dir = path.Path(root_dir) n_video = 1200 for index in range(1, n_video + 1): video_dir = root_dir / f"{index:08d}" random_state = np.random.RandomState(index) generate_a_video(video_dir, random_state, connection_method)
def _prepare_upload(self): session = self.account.http_settings.session proxy = self.account.http_settings.proxy if not self.account.is_logined(): raise NotLogined('YouPorn account is not logined') upload_page = session.get('http://www.youporn.com/upload', proxies=proxy) doc = etree.fromstring(upload_page.content, HTMLParser()) callback_url = doc.xpath('//input[@name="callbackUrl"]/@value')[0] session.headers.update({ "X-Requested-With": "XMLHttpRequest", "Content-Type": "application/x-www-form-urlencoded; charset=UTF-8" }) video_size = os.path.getsize(self.video_upload_request.video_file) video = path.Path(self.video_upload_request.video_file) post = {'file': video.name, 'size': video_size} create_upload_request = session.post( 'http://www.youporn.com/upload/create-videos/', data=post, proxies=proxy) response = json.loads(create_upload_request.content) if 'success' in response: if not response['success']: raise FailedUpload( 'Failed to upload video reason:{reason}'.format( reason=response['reason'])) del session.headers['X-Requested-With'] response['callback_url'] = callback_url return response
def test_filter_daophot_out(self): with path.Path(self.test_dir): print("Files before: ", glob.glob("*")) prefix = "20180611T055249_053" file_in = prefix + ".coo" file_out = prefix + ".lst" xmin, xmax, ymin, ymax = 5, 45, 5, 45 daophot.filter_daophot_out(file_in, file_out, xmin, xmax, ymin, ymax) print("Files after: ", glob.glob("*")) # Make sure output files exists. self.assertTrue(os.path.exists(file_out)) # Check that only x y ramin that fall inside of xmin,xmax and ymin,ymax. with open(file_out) as f: ll = f.readlines() for l in ll[3:]: tt = l.split() x, y = float(tt[1]), float(tt[2]) self.assertTrue(x > xmin) self.assertTrue(y < xmax) self.assertTrue(y > ymin) self.assertTrue(y < ymax)
def test(): print("lane detector test") img_files = path.Path("./data/cut").glob("*") num_imgs = len(img_files) print("Total %d imgs" % num_imgs) imgs = list( map(lambda x: cv2.imread(str(x), cv2.COLOR_RGB2GRAY), img_files)) laneDetector = LaneDetector(config) for idx, img in enumerate(imgs): type_result = laneDetector.pcr_detect(img) print(idx, "번째 pcr:", type_result) if type_result[0][0] == 3: print(idx, " th : no parking lot") continue # type_result = [[0], [0]] result, sess = laneDetector.psd_detect(type_result, img) print(idx, "번째 psd:", result) sess.close() tf.reset_default_graph()
def __init__(self, img_file, spawningpoint): """ Descr: __init__ sets initial variables and image Params: self img_file is a string containing file name spawningpoint is a tuple containing coordinates Returns: none """ pygame.sprite.Sprite.__init__(self) #Puts image into the invader image = pygame.image.load('assets/' + img_file).convert_alpha() self.image = pygame.transform.scale(image, (20, 20)) #Maintains reference to rectangle/allows movement self.rect = self.image.get_rect() self.rect.x = spawningpoint[0] self.rect.y = spawningpoint[1] #Stats of the Invaders self.health = 100 self.speed = 30 self.location = 1 self.p = path.Path() self.whichimage = 1
def execute(self, msg): """ 程序本体逻辑,processor之间存在依赖关系,通过在另一个类中数据传递处理这种依赖(数据传递的耦合相对较低), 避免了被依赖类的对象向另一个类对象传递的高度耦合。 """ if self.input.condition and not self.output(None).listdir(): print(self.__class__.__name__) p2s = "====".join(self.input.path2.listdir()) for f1_full_name in self.input.path1.listdir(): f1name = f1_full_name.name.splitext()[0] f1_index = p2s.index(f1name) f2_full_name = self.input.path2.joinpath(p2s[f1_index:( f1_index + f1name.__len__())] + "_result_1.csv") print(f1_full_name, f1_full_name) df = pd.read_csv(f1_full_name).merge(pd.read_csv(f2_full_name), on="time10", how="inner") gsm = GaitStatusMarkerProcessor(ProjParaShop.STATUS_DEFINITION) df = gsm.process(df) df.to_csv(path.Path( ProjParaShop.FORMED_SIGNAL_MARK_PATH).joinpath( f1_full_name.name), index=False)
def generate(self, m=None): if m is None: m = self.matrix object_path = path.Path() svg_parser.parse_svg_path(object_path, self.path) self.box = object_path.bbox() if VARIABLE_NAME_SPEED in self.properties: speed = self.properties.get(VARIABLE_NAME_SPEED) yield COMMAND_SET_SPEED, speed if VARIABLE_NAME_POWER in self.properties: power = self.properties.get(VARIABLE_NAME_POWER) yield COMMAND_SET_POWER, power if VARIABLE_NAME_DRATIO in self.properties: d_ratio = self.properties.get(VARIABLE_NAME_DRATIO) yield COMMAND_SET_D_RATIO, d_ratio plot = object_path * m first_point = plot.first_point yield COMMAND_RAPID_MOVE, first_point yield COMMAND_SET_STEP, 0 yield COMMAND_MODE_COMPACT, 0 yield COMMAND_PLOT, plot yield COMMAND_MODE_DEFAULT, 0 yield COMMAND_SET_SPEED, None yield COMMAND_SET_D_RATIO, None
def Compute(self, x, y,currPath, desPath, theta, r, d): del self.path.route del self.path.point self.path = path.Path(MAXSTEER) p1 = path.Point(x,y) self.currentPath = currPath self.destPath = desPath p2 = path.Point(x + 14*math.cos(theta+ d*math.pi/16.0), y + 14*math.sin(theta+ d*math.pi/16.0)) self.computed = False for j in range(0, len(self.destPath.point)-1): p3 = self.destPath.point[j] p4 = self.destPath.point[j+1] if p4.id == r: if self.intersect(p1, p2, p3, p4) : #print '{0} {1} {2} {3}'.format(1*d, math.pi/7.8, -1*d, math.pi/7.8) self.path.append(1*d, math.pi/7.8) self.path.append(-1*d, math.pi/7.8) self.path.computePath(p1.x, p1.y) self.computed = True break; self.p1 = p1 self.p2 = p2 self.theta = theta
def eventCalculator(cls): """对数据源的数据执行不同决策策略""" from .services import EventCalculator from .core_algo import StrategyResult2 calculator = EventCalculator() # 待计算的数据位置 root_p = path.Path( r'E:\my_proj\fog_recognition\ExtendFoGData\fixed_data\Tuned4Model') data = joblib.load(root_p.joinpath('ModelParaSelector/15')) for k in data: data[k]["predict_result"].to_csv( root_p.joinpath( 'ModelParaSelector/SelectedParaResult').joinpath(k)) data_path = root_p.joinpath('ModelParaSelector/SelectedParaResult') calculator.set_para_with_prop({ "strategy": StrategyResult2(), "data_path": data_path, "save_path": root_p.joinpath('EventParaSelector') }) return calculator
import path import vtk #print(dir(path)) ## create a Path . # cpt1 = [2.0, 0.0, 0.0] cpt2 = [3.0, 0.0, 0.0] cpt3 = [4.0, 0.0, 0.0] cpt4 = [5.0, 0.0, 0.0] path = path.Path() path.add_control_point(cpt1) path.add_control_point(cpt2) path.add_control_point(cpt3) path.add_control_point(cpt4) path_geom = path.get_geometry()
import cv2 import path import numpy as np from PreProcessamento import PreProcessamento # Obtem o camihho para o arquivo do haarcascade ARQUIVO_HAARCASCADE_FRONTALFACE = path.Path( "resources\\haarcascade_frontalface_default.xml") class CapturaCamera: def __init__(self, preProc, classificadorFaces): self.PreProc = preProc # Cria um classificador com base no Haarcascade self.classificadorFaces = classificadorFaces def CapturarFrames(self): # cria a camera a partir do opencv camera = cv2.VideoCapture(0) while (True): # Lê da camera conectado, imagem = camera.read() # Cria uma imagem cinza imagemCinza = cv2.cvtColor(imagem, cv2.COLOR_BGR2GRAY) # Obtem as faces detectadas pelo haarcascade # facesDetectadas = self.classificadorFaces.detectMultiScale(imagemCinza, scaleFactor=1.5, minSize=(100, 100)) facesDetectadas = self.classificadorFaces.CapturarRosto( imagemCinza) # Desenha os retangulos de acordo com as faces obtidas pelo haarcascade for (x, y, l, a) in facesDetectadas:
os.remove(file_name) def check(p, KOM): keywords = [re.sub(r'[ -]', '_', kw.name[1:]) for kw in KOM.keywords] keywords = sorted(set(keywords)) # print(keywords) print('Total {} keywords'.format(len(keywords))) pages = [fn for fn in os.listdir(p.doc) if fn.endswith('.html')] pages = sorted(pages) print('Total {} HTML pages'.format(len(pages))) # print(pages) if len(keywords) > len(pages): for page in pages: if page[:-5] in keywords: keywords.remove(page[:-5]) print('Those keywords have no HTML pages:') print(keywords) # Run test if __name__ == '__main__': clean.screen() p = path.Path() s = settings.Settings(p) KOM = kom.KOM(p, s) # regenerate_documentation(p, KOM) # remove_html_trash(p, KOM) # remove_png_trash() # check(p, KOM)
def approximate_path_by_pathcount(args, result_path, source_path, ktest_tool_path): if(not "=" in args): print("Usage: python find_approx.py --approximate-path-by-pathcount=<N>") sys.exit() pathcount_threshold = int(args.split('=')[1]) if(pathcount_threshold > 100): print("Path count threshold should be less than 100") sys.exit() #print print("Source: " + source_path) print("Output: " + result_path) print("Selected path count threshold: %.2f%%" % pathcount_threshold) # create all path objects paths = [] input_error_repeat = 100 scaling = 1.0 probability_sum = 0.0 for root, dirs, files in os.walk(result_path): for filename in files: if filename.endswith(".prob"): with open(result_path + "/" + filename, 'r') as fin: idx = int(fin.readline().split(",")[2].strip()) prob = float(fin.readline().split(",")[1]) new_path = path.Path(idx, prob) paths.append(new_path) probability_sum += prob # Get the input variables and their types and mark those for which error is tracked # TODO: Handle floats converted to ints (we only need to do this handling if the conversion happened in the input) source = open(source_path, "r") input_variables = [] for line in source: if re.match("(.*)klee_make_symbolic(.*)", line): tokens = re.split(r'[(|)]|\"', line) input_variables.append((tokens[2], tokens[4])) source.close() # print(input_variables) source = open(source_path, "r") approximable_input = [] for line in source: if re.match("(.*)klee_track_error(.*)", line): tokens = re.split(r'[(|)]|\"|&|,', line) approximable_input.append(tokens[2]) source.close() # Maintain a measure of the approximability of the input input_approximability_count = [] expression_count = 0 for var in approximable_input: input_approximability_count.append(0) # Get the non-approximable input non_approximable_input = list(set([x[1] for x in input_variables]) - set(approximable_input)) #sort by path probability paths.sort(key=lambda p: p.path_prob) # find approximable variables in each path running_path_count = 0.0 all_variables = set() for p in paths: running_path_count += 1 if(((running_path_count * 100) / len(paths)) > pathcount_threshold): break # Get the path condition with error path_condition_with_error = "" source = open(result_path + "/" + "test" + "{:0>6}".format(str(p.path_id)) + ".kquery_precision_error", "r") for line in source: path_condition_with_error += line.rstrip("\n\r") path_condition_with_error += " " source.close() path_condition_with_error = path_condition_with_error.replace("!", "not") path_condition_with_error = path_condition_with_error.replace(" = ", " == ") path_condition_with_error = path_condition_with_error.replace("&&", "and") path_condition_with_error = path_condition_with_error.replace(">> 0", "") path_condition_with_error = path_condition_with_error.replace(">> ", "/2**") path_condition_with_error = path_condition_with_error.replace("<< ", "*2**") # generate an input, for which the path condition is satisfied result = subprocess.run([ktest_tool_path, '--write-ints', result_path + "/" + "test" + "{:0>6}".format(str(p.path_id)) + '.ktest'], stdout=subprocess.PIPE) output_string = result.stdout.decode('utf-8') tokens = re.split(r'\n|:', output_string) idx = 5 num_args = int(tokens[idx].strip()) for args in range(num_args): exec("%s = %d" % (tokens[idx + 3].strip().replace("'", ""), int(tokens[idx + 9].strip()))) idx += 9 if(not os.path.isfile(result_path + "test" + "{:0>6}".format(str(p.path_id)) + '.precision_error')): continue with open(result_path + "/" + "test" + "{:0>6}".format(str(p.path_id)) + '.precision_error', 'r') as infile: for line in infile: method_name_line_tokens = line.split() if(len(method_name_line_tokens) > 0 and method_name_line_tokens[0] == 'Line'): method_name = method_name_line_tokens[4].rstrip(':') # process expression line next_line = infile.readline() tokens = next_line.split() if(len(tokens) > 0 and tokens[0] == 'Output'): expression_count += 1 # if the error expression is 0, add to non-approximable list if(tokens[5] == '0'): p.non_approximable_var.append((tokens[3].strip(), method_name)) all_variables.add(tokens[3]) p.all_var.append(tokens[3].strip()) continue # read and sanitize expression exp = next_line.split(' ', 5)[5].strip("\n") exp = exp.replace(">> 0", "") exp = exp.replace(">> ", "/2**") exp = exp.replace("<< ", "*2**") is_var_approximable = 0 # For each approximable input variable for idx, var in enumerate(approximable_input): # assign other variable errors to zero for temp_var in approximable_input: var_with_err_name = temp_var + "_err" exec("%s = %f" % (var_with_err_name, 0.0)) # for repeat result = [] for x in range(input_error_repeat): # Generate a random error value in (0,1) for the concerned variable var_with_err_name = var + "_err" input_error = random.uniform(0.0, 1.0) exec("%s = %f" % (var_with_err_name, input_error)) # Check if path condition with error is satisfied if(eval(path_condition_with_error)): # If satisfied, get the output error from expression output_error = eval(exp) result.append((input_error, output_error)) input_approximability_count[idx] += 1 if(len(result)): # Check for monotonicity of output error. If not monotonous continue to evaluate other inputs. result = sorted(result, key=lambda x: x[0]) monotonous_count = 0 for index, item in enumerate(result): if(index < (len(result) - 1) and item[1] <= result[index + 1][1]): monotonous_count += 1 # If at least 90% monotonous, get the linear regression gradient if((monotonous_count / (len(result) - 1)) >= 0.8): list_x, list_y = zip(*result) # linear reqression code from https://www.geeksforgeeks.org/linear-regression-python-implementation/ xdata = np.array(list_x) ydata = np.array(list_y) n = np.size(xdata) m_x, m_y = np.mean(xdata), np.mean(ydata) SS_xy = np.sum(ydata * xdata - n * m_y * m_x) SS_xx = np.sum(xdata * xdata - n * m_x * m_x) b_1 = SS_xy / SS_xx # If gradient > 50% mark as non-approximable, else continue for other variables in the expression if(b_1 <= 0.5): is_var_approximable = 1 # If for at least one variable in the expression, the output is approximable, then add to approximable list. # Else add to the non-approximable list all_variables.add(tokens[3].strip()) p.all_var.append(tokens[3].strip()) if(is_var_approximable): p.approximable_var.append((tokens[3].strip(), method_name)) else: p.non_approximable_var.append((tokens[3].strip(), method_name)) else: continue approximability_result = [] for var in all_variables: path_score = 0.0 prob_score = 0.0 number_of_paths_present_count = 0 approximable_paths_count = 0 for p in paths: # if variable appears in that path if(var in p.all_var): number_of_paths_present_count += 1 # if in approximable list if(len(p.approximable_var) > 0): approximable_var_in_path = list(zip(*p.approximable_var))[0] if(var in approximable_var_in_path): approximable_paths_count += 1 prob_score += p.path_prob path_score = approximable_paths_count * 100 / number_of_paths_present_count prob_score = prob_score * 100 / probability_sum approximability_result.append((var, path_score, prob_score)) print("\nApproximability of program variables\n================================") print("var_name\tpathscore\tprobability score") for result in approximability_result: print("%s\t\t%.2f\t\t%e" % (result[0], result[1], result[2])) # for p in paths: # print("%d %.2f" %(p.path_id,(p.path_prob * 100 / probability_sum))) print("\nApproximability of input variables\n================================") for idx, var in enumerate(approximable_input): print(var + ' : %d%%' % ((input_approximability_count[idx] / (expression_count * input_error_repeat)) * 100))
def collect_sources(ignore_func): top_path = path.Path(".") for py_path in top_path.walkfiles("*.py"): py_path = py_path.normpath() # get rid of the leading '.' if not ignore_func(py_path): yield py_path
#!/usr/bin/env python3 from cffi import FFI import json import path ffi_builder = FFI() libs = ["../.build/lib/libchuck-norris.a"] includes = ["../include"] conan_info = json.loads(path.Path("../.build/conanbuildinfo.json").text()) for dep in conan_info["dependencies"]: for lib_name in dep["libs"]: lib_file = "lib{}.a".format(lib_name) for lib_path in dep["lib_paths"]: candidate = path.Path(lib_path).joinpath(lib_file) if candidate.exists(): libs.append(candidate) else: libs.append(lib_name) for include_path in dep["include_paths"]: includes.append(include_path) print(libs) print(includes) ffi_builder.set_source( "_chuck_fact", """
Django settings for calculator project. Generated by 'django-admin startproject' using Django 1.11.2. For more information on this file, see https://docs.djangoproject.com/en/1.11/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.11/ref/settings/ """ import os import path # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = path.Path(__file__).dirname().dirname().dirname() # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = '(3xt92tw&nztwo1w1coe&9(kz$5=zl1bj824u5pe$*ax$qu-mc' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = [
#!/usr/bin/env python import sys import path import morefusion here = path.Path(__file__).abspath().parent sys.path.insert(0, here / "../ycb_video_checks") from check_dataset import get_scene # NOQA if __name__ == "__main__": dataset = ( morefusion.datasets.YCBVideoPoseCNNResultsRGBDPoseEstimationDataset() ) # NOQA morefusion.extra.trimesh.display_scenes(get_scene(dataset), height=int(320 * 0.7), width=int(480 * 0.7))
def _run_image(outfolder, image_data, cmd=None, user='******', make_tarball=False): # download image image_path = path.Path('img').realpath() print("Ensuring presence of " + image_data.base_image_url) ensure_image( image_data.base_image_name, image_data.base_image_url, IMAGES_ROOT, image_data.base_image_md5, untar_to=image_path, ) lxc_file_path = write_lxc_config(image_path) lxc_name = 'build_image-' + image_data.new_image_name script_path = None if cmd is None: # copy bootstrap script into place and ensure it's executable. script = os.path.basename(image_data.script_url) script_path = image_path / script if os.path.exists(image_data.script_url): shutil.copy(image_data.script_url, script_path) else: ensure_file(image_data.script_url, script_path) script_path.chmod('a+x') real_cmd = '/' + script else: real_cmd = cmd # Call lxc-start, passing in our LXC config file and telling it to run # our build script inside the container. lxc_args = [ 'lxc-start', '--name', lxc_name, '--rcfile', lxc_file_path, '--', real_cmd, ] path_items = ( '/usr/local/sbin', '/usr/local/bin', '/usr/sbin', '/usr/bin', '/sbin', '/bin', '/usr/games', ) env = { 'PATH': os.pathsep.join(path_items), 'HOME': '/root', } env.update(image_data.env or {}) if 'TERM' in os.environ: env['TERM'] = os.environ['TERM'] logpath = os.path.join(outfolder, '%s.log' % image_data.new_image_name) print("LOGPATH", logpath) with open(logpath, 'w') as logfile: tee(lxc_args, env, logfile) # remove build script if we used one. if cmd is not None: os.remove(script_path) if make_tarball: img_name = '{image_data.new_image_name}.tar.gz'.format(**locals()) tardest = os.path.join(outfolder, img_name) print("Compressing image to " + tardest) with tarfile.open(tardest, 'w:gz') as tar: tar.add(image_path, arcname='')
import path a = path.Path('G:\python\BaiVeNha\log.txt','log.txt') b = path.Path('G:\python\dcl\.gitignore','.gitignore') c = path.Path('G:\python\BaiVeNha\README.md','README.md') a.write_to_file('path.json') b.write_to_file('path.json') c.write_to_file('path.json')
import path import sh for project in path.Path(".").dirs(): with project: sh.git.clean(force=True) sh.git.reset(hard=True) sh.make()
logger = logging.getLogger('cx') logger.info('Start to Load Biological Data') CX_Neuropils = ['PB', 'EB', 'NO', 'no', 'BU', 'bu', 'LAL', 'lal'] neuropil_name_to_node = {} for neuropil in CX_Neuropils: node = graph.Neuropils.create(name=neuropil, version=cx_version) neuropil_name_to_node[neuropil] = node logger.info('Created Neuropils') # File names grouped by neuropil in which neurons' presynaptic terminals # arborize: data = path.Path('neurons') # LAL subregions list LAL_subregions_list = ['RGT', 'RDG', 'RVG', 'RHB'] # lal subregions list lal_subregions_list = ['LGT', 'LDG', 'LVG', 'LHB'] # NO subregions list NO_subregions_list = [ '(1,R)', '(2,RD)', '(2,RV)', '(3,RP)', '(3,RM)', '(3,RA)' ] # no subregions list no_subregions_list = [ '(1,L)', '(2,LD)', '(2,LV)', '(3,LP)', '(3,LM)', '(3,LA)' ]
def main(args): vehicle_amount = 1 if len(args) > 1: vehicle_amount = int(args[1]) vehicle_ids = [] for i in range(vehicle_amount): vehicle_ids.append('v{}'.format(i + 1)) # PID parameters for path tracking. k_p = 0.5 k_i = -0.02 k_d = 3 horizon = 15 delta_t = 0.1 Ad = numpy.matrix([[1., 0.], [delta_t, 1.]]) Bd = numpy.matrix([[delta_t], [0.]]) zeta = 0.5 # z = 1 -> full timegap tracking. Q_v = 1 # Part of Q matrix for velocity tracking. Q_s = 1 # Part of Q matrix for position tracking. Q = numpy.array([Q_v, 0, 0, Q_s]).reshape(2, 2) # State tracking. R_acc = 0.1 R = numpy.array([1]) * R_acc # Input tracking. velocity_min = 0. velocity_max = 2. position_min = -100000. position_max = 1000000. acceleration_min = -1.5 acceleration_max = 1.5 truck_length = 0.3 safety_distance = 0.2 timegap = 1. delay = 0.0 simulation_length = 40 # How many seconds to simulate. xmin = numpy.array([velocity_min, position_min]) xmax = numpy.array([velocity_max, position_max]) umin = numpy.array([acceleration_min]) umax = numpy.array([acceleration_max]) # Reference speed profile. opt_v_pts = 400 # How many points. opt_v_max = 1.2 opt_v_min = 0.8 opt_v_period_length = 60 # Period in meters. speed_ref = speed_profile.Speed() speed_ref.generate_sin(opt_v_min, opt_v_max, opt_v_period_length, opt_v_pts) speed_ref.repeating = True # vopt = speed_profile.Speed([1], [1]) # Controller reference path. x_radius = 1.4 y_radius = 1.2 center = [0.2, -y_radius / 2] pts = 400 variance = 0. plot_data = True save_data = True filename = 'measurements/sim_dmpc' + '_' + '_'.join(vehicle_ids) + '_' pt = path.Path() pt.gen_circle_path([x_radius, y_radius], points=pts, center=center) start_distance = 0.5 path_len = pt.get_path_length() vehicles = [] for i, vehicle_id in enumerate(vehicle_ids): theta = (len(vehicle_ids) - i - 1)*2*math.pi*start_distance/path_len + 0.1 xcoord = center[0] + x_radius*math.cos(theta) ycoord = center[1] + y_radius*math.sin(theta) x = [xcoord, ycoord, theta + math.pi/2, 0] # x = [center[0], center[1] + y_radius, math.pi, 0] # # x = [0, 0, math.pi, 0] vehicles.append(trxmodel.Trx(x=x, ID=vehicle_id)) mpc = DistributedMPC(vehicles, pt, Ad, Bd, delta_t, horizon, zeta, Q, R, truck_length, safety_distance, timegap, k_p, k_i, k_d, simulation_length, xmin=xmin, xmax=xmax, umin=umin, umax=umax, speed_ref=speed_ref, delay=delay, variance=variance) mpc.run() if save_data: mpc.save_data_as_rosbag(filename) if plot_data: mpc.plot_stuff()
def main(args): # ID of the vehicle. if len(args) < 2: print('Need to enter at least one vehicle ID.') sys.exit() vehicle_id = args[1] # First argument is the ID of the vehicle. if len(args) > 2: preceding_id = args[2] # Second argument is, if entered, the ID of the preceding vehicle. is_leader = False else: preceding_id = 'None' # If no second argument, the vehicle is the leader. is_leader = True # Topic name for subscribing to truck positions. position_topic_name = 'mocap_state' # Topic name for publishing vehicle commands. control_topic_name = 'pwm_commands' # Name for starting and stopping recording. recording_service_name = vehicle_id + '/dmpc/set_measurement' # Filename prefix for recording data. recording_filename = 'dmpc' + '_' + vehicle_id + '_' # PID parameters for path tracking. k_p = 0.5 k_i = 0 k_d = 3 # MPC information. horizon = 20 delta_t = 0.1 Ad = numpy.matrix([[1., 0.], [delta_t, 1.]]) Bd = numpy.matrix([[delta_t], [0.]]) zeta = 0.90 s0 = 0. v0 = 0. Q_v = 1 # Part of Q matrix for velocity tracking. Q_s = 0.5 # Part of Q matrix for position tracking. Q = numpy.array([Q_v, 0, 0, Q_s]).reshape(2, 2) # State tracking. R_acc = 0.1 R = numpy.array([1]) * R_acc # Input tracking. velocity_min = 0. velocity_max = 2. position_min = -100000. position_max = 1000000. acceleration_min = -0.5 acceleration_max = 0.5 truck_length = 0.2 safety_distance = 0.1 timegap = 1. delay = 1. x0 = numpy.array([s0, v0]) xmin = numpy.array([velocity_min, position_min]) xmax = numpy.array([velocity_max, position_max]) umin = numpy.array([acceleration_min]) umax = numpy.array([acceleration_max]) # Reference speed profile. opt_v_pts = 1000 # How many points. opt_v_max = 1.2 opt_v_min = 0.8 opt_v_period_length = 60 # Period in meters. vopt = speed_profile.Speed() vopt.generate_sin(opt_v_min, opt_v_max, opt_v_period_length, opt_v_pts) vopt.repeating = True # Controller reference path. x_radius = 1.4 y_radius = 1.2 center = [0.2, -y_radius/2] pts = 400 pt = path.Path() pt.gen_circle_path([x_radius, y_radius], points=pts, center=center) # Initialize controller. controller = Controller( position_topic_name, control_topic_name, vehicle_id, preceding_id, is_leader, pt, Ad, Bd, delta_t, horizon, zeta, Q, R, truck_length, safety_distance, timegap, speed_ref=vopt, xmin=xmin, xmax=xmax, umin=umin, umax=umax, x0=x0, k_p=k_p, k_i=k_i, k_d=k_d, recording_service_name=recording_service_name, recording_filename=recording_filename, delay=delay ) # Start controller. controller.run()
import path, mistune, flask a, d = flask.Flask(""), path.Path("p") @a.route("/<p>") def s(p): f = d / p k = flask.request.args.get('n', f.text()) f.write_text(k) b = [f"[{l[2:]}](/{l[2:]})\n" for l in d.files() if "/" + p in l.text()] return mistune.markdown(f"#{p}\n{k}\n\n" + ''.join( b)) + f"<form><textarea name=n>{k}</textarea>\n<button>"
np.random.seed(args.seed) os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu) os.environ['PYTHONHASHSEED'] = str(args.seed) use_norm = 'use-norm' if args.use_norm else 'no-norm' add_self_loop = 'add-self-loop' if args.add_self_loop else 'no-self-loop' #### configure output directory dataname = f'{args.data}_{args.dataset}' model_name = args.model_name nlayer = args.nlayer dirname = f'{datetime.datetime.now()}'.replace(' ', '_').replace(':', '.') out_dir = path.Path( f'./{args.out_dir}/{model_name}_{nlayer}_{dataname}/seed_{args.seed}') if out_dir.exists(): shutil.rmtree(out_dir) out_dir.makedirs_p() ### configure logger from logger import get_logger baselogger = get_logger('base logger', f'{out_dir}/logging.log', not args.nostdout) resultlogger = get_logger('result logger', f'{out_dir}/result.log', not args.nostdout) baselogger.info(args) # load data
from vision.learner import DashVisionLearner import json import path path = path.Path('./') with open('data/response.json') as f: response = json.load(f) learn = DashVisionLearner.create_vision_learner(response) print('Created learner!') print('Now training...') learn.fit_one_cycle(1) print('Done!')
import pandas as pd import numpy as np from sklearn.metrics import f1_score import matplotlib.pyplot as plt from sklearn.model_selection import cross_validate, KFold import path from typing import Callable DATA_DIR = path.Path("../data/") ARTIFACT_DIR = path.Path("../artifacts/") def evaluate(y_test, pred): return f1_score(y_test, pred) # average='macro') def helper_cross_validate(X: pd.DataFrame, y: pd.Series, train_model: Callable): kf = KFold(n_splits=10) score_list = [] for train_index, test_index in kf.split(X): X_train, X_test = X.iloc[train_index], X.iloc[test_index] y_train, y_test = y.iloc[train_index], y.iloc[test_index] # if pass_val: # model=train_model(model,X_train,y_train,X_test,y_test) # else: # model=train_model(model,X_train,y_train) model = train_model(X_train, y_train, X_test, y_test) pred = model.predict(X_test) score = evaluate(y_test, pred)
from path import Path import path import collections import os, sys # import pickle import pickle as dill # import pi # import dill PY3 = sys.version.startswith('3.') if PY3: from importlib import reload else: pass __file__ = os.path.realpath(__file__) CDIR = path.Path(__file__).realpath().dirname() class SharedObject(object): DIR = Path('build').makedirs_p().realpath() def readInput(x): with open((x), 'r') as f: return f.read() def middleStep(x): return readInput(x)