def cmd(self, command_line): """ command_line -- all of the keywords passed in the command string, parsed """ machine_name = check_machine_name(command_line) if len(command_line) == 3 or command_line[3] == "config": command_string = "edit machine %s" % machine_name command_line = CommandLine.process_input(command_string) return Edit.Machine().cmd(command_line) command = command_line[3] if len(command_line) != 5: msg = "Incomplete command; need to include %s name." % command raise CommandError(msg) object_name = command_line[4] if command == "bom": command_string = "edit bom %s" % object_name command_line = CommandLine.process_input(command_string) return Edit.Bom().cmd(command_line) elif command == "include": command_string = "edit include %s" % object_name command_line = CommandLine.process_input(command_string) return Edit.Include().cmd(command_line) raise CommandError("Unknown command: %s" % command)
def getEarthObservationImage(image_directory, lat, lon, dim, date, direct_method): # TODO: Add explanation to the difference between the methods (direct/indirect). log.debug("Retrieving Earth observation information and images") log.info("The selected directory is - {}".format(image_directory)) log.info("Selected latitude is - {}".format(lat)) log.info("Selected longitude is - {}".format(lon)) log.info("Selected dimension (width and height of image in degrees) is - {}".format(dim)) log.info("Selected date is - {}".format(date)) log.warning("Format for date is - YYYY-MM-DD") log.debug("Will use direct method - {}".format(direct_method)) if direct_method is False: url = getEarthObservationImageUrl(lat=lat, lon=lon, dim=dim, date=date) else: url = EARTH_OBSERVATION_IMAGERY_BASE_URL + formatUrlComplement(lat=lat, lon=lon, dim=dim, date=date) log.debug("Changing command line working directory to given directory") os.chdir(image_directory) log.info("Images will be saved as .png files") log.info("Image URL is - {}".format(url)) CommandLine.runCmd(['curl', '-o', "EARTH_" + str(lat) + "_" + str(lon) + "_" + str(dim) + "_" + str(date) + ".png ", url]) log.info("For full API documentation - https://api.nasa.gov/")
def getAstronomyPictureOfTheDay(image_directory, date, hd): log.debug("Retrieving APOD (Astronomy Picture Of the Day) image") log.debug("The selected directory is - {}".format(image_directory)) log.debug("Selected date is - {}".format(date)) log.warning("Date format has to be YYYY-MM-DD of an existing date") log.debug("HD version of the image - {}".format(hd)) url = getAstronomyPictureOfTheDayUrl(date, hd) log.debug("Changing command line working directory to given directory") os.chdir(image_directory) log.debug("Images will be saved as .JPG files") log.debug("Image URL is - {}".format(url)) CommandLine.runCmd(["wget", "-O", "APOD_" + date + ".JPG", url]) log.info("For full API documentation - https://api.nasa.gov/")
def main(argv): poses = FileIO.getFromPickle(argv[0]) state = State.State() state.path = argv[0][0:argv[0].rfind('/')] print(state.path) state.camNames = poses['camnames'] state.R = poses['R'] state.t = poses['t'] print(poses) print("YEET") PointCloudVisualizer.PCViewer(poses, argv[0], (480, 640), state) #sets thread for terminal window CommandLine.Start(state, CommandsImporterPC.CommandsImporterPC) #sets thread for pipeline #t1 = threading.Thread(target=worker,args=( state,)) #t1.start() try: print("SPINN") rospy.spin() except KeyboardInterrupt: print("shut") state.Stop()
def test_ProperOutput(self): ''' Checks if output is as expected. ''' input_str = "SampleNumber=3234 provider=Dr. M. Welby patient=John Smith priority=High" output = CommandLine.CommandLine(input_str) self.assertEqual(output, "12=4 8=12 7=10 8=4")
def getNasaEpicImage(image_directory): log.debug( "Retrieving Nasa EPIC (Earth Polychromatic Imaging Camera) images") log.info("The selected directory is - {}".format(image_directory)) url_list = getNasaEpicImagesUrl() i = 0 log.debug("Changing command line working directory to given directory") os.chdir(image_directory) log.info("Images will be saved as .png files") for url in url_list: log.debug("Current image number is - {}".format(i + 1)) log.info("Current image URL is - {}".format(url)) CommandLine.runCmd(['curl', '-o', "EPIC_" + str(i) + ".png ", url]) i = i + 1 log.info( "For full API documentation - https://epic.gsfc.nasa.gov/about/api")
class ShowRecord(CommandLine.Application): header = 'Tool for displaying BeeDict records' synopsis = '%s [options] dictname keys...' options = [ CommandLine.ArgumentOption('-d', 'Display depth', 3), CommandLine.SwitchOption('-a', 'Show all records') ] def check_files(self, files): if len(files) < 1: self.help('Missing arguments') sys.exit(1) def main(self): show(self.files[0], self.files[1:], self.values['-a'], self.values['-d'])
def getMarsRoverImages(image_directory, rover, sol, date): log.debug("Retrieving Mars rover images") log.info("The options for the Mars rovers are - Spirit/Opportunity/Curiosity") log.info("The selected directory is - {}".format(image_directory)) log.info("The selected rover is - {}".format(rover)) log.info("The selected sol (day) is - {}".format(date)) log.info("The selected date format (Earth/Mars dates) is - {}".format(sol)) photos_url = getMarsRoverImagesUrl(getMarsRoverManifest(rover), sol, date) i = 0 log.debug("Changing command line working directory to given directory") os.chdir(image_directory) log.info("Images will be saved as .JPG files") for url in photos_url: log.debug("Current image number is - {}".format(i + 1)) log.info("Current image URL is - {}".format(url)) CommandLine.runCmd(["wget", "-O", "MARS_" + str(i) + ".JPG", url]) i = i + 1 log.info("For full API documentation - https://api.nasa.gov/")
def getNasaLibraryImages(image_directory, q, mediaType, startYear, endYear): log.debug("Retrieving NASA library images using a query") log.info("Provided query is - {}".format(q)) log.info("Selected media type is - {}".format(mediaType)) log.info("Selected start/end years are - {} - {}".format( startYear, endYear)) photos_url = getNasaLibraryDataUrl(q, mediaType, startYear, endYear) i = 0 log.debug("Changing command line working directory to given directory") os.chdir(image_directory) log.info("Images will be saved as .JPG files") for url in photos_url: log.debug("Current image number is - {}".format(i + 1)) log.info("Current image URL is - {}".format(url)) CommandLine.runCmd(["wget", "-O", "NASA_" + str(i) + ".JPG", url]) i = i + 1 log.info( "For full API documentation - https://images.nasa.gov/docs/images.nasa.gov_api_docs.pdf" )
def convListToDict(list): if len(list) % 2 != 0: raise CL.ArgError("Invalid input list " + str(list)) retVal = [] while len(list) > 0: temp = [] key = list.pop(0) val = list.pop(0) temp.append(key) temp.append(val) retVal.append(temp) return dict(retVal)
def getAllDependenciesWithPaths(self, key): # we must check to see if the dependencies actually exist. # generally we have to make sure to remove duplicates allDeps = [] if self.topModule.moduleDependency.has_key(key): for dep in self.topModule.moduleDependency[key]: if allDeps.count(dep) == 0: allDeps.append(self.topModule.buildPath + "/" + dep) for module in self.moduleList: allDeps += self.getModuleDependenciesWithPaths(module, key) if len(allDeps) == 0 and CommandLine.getBuildPipelineDebug(self) > 1: sys.stderr.write("Warning: no dependencies were found") return allDeps
def getAllDependenciesWithPaths(self, key): # we must check to see if the dependencies actually exist. # generally we have to make sure to remove duplicates allDeps = [] if (self.topModule.moduleDependency.has_key(key)): for dep in self.topModule.moduleDependency[key]: if (allDeps.count(dep) == 0): allDeps.append(self.topModule.buildPath + '/' + dep) for module in self.moduleList: allDeps += self.getModuleDependenciesWithPaths(module, key) if (len(allDeps) == 0 and CommandLine.getBuildPipelineDebug(self) > 1): sys.stderr.write("Warning: no dependencies were found") return allDeps
def getSynthBoundaryDependencies(self, module, key): # we must check to see if the dependencies actually exist. allDesc = self.getSynthBoundaryDescendents(module) # grab my deps # use hash to reduce memory usage allDeps = [] for desc in allDesc: if desc.moduleDependency.has_key(key): for dep in desc.moduleDependency[key]: if allDeps.count(dep) == 0: allDeps.extend([dep] if isinstance(dep, str) else dep) if len(allDeps) == 0 and CommandLine.getBuildPipelineDebug(self) > 1: sys.stderr.write("Warning: no dependencies were found") return allDeps
def getSynthBoundaryDependencies(self, module, key): # we must check to see if the dependencies actually exist. allDesc = self.getSynthBoundaryDescendents(module) # grab my deps # use hash to reduce memory usage allDeps = [] for desc in allDesc: if (desc.moduleDependency.has_key(key)): for dep in desc.moduleDependency[key]: if (allDeps.count(dep) == 0): allDeps.extend([dep] if isinstance(dep, str) else dep) if (len(allDeps) == 0 and CommandLine.getBuildPipelineDebug(self) > 1): sys.stderr.write("Warning: no dependencies were found") return allDeps
def getAllDependencies(self, key): # we must check to see if the dependencies actually exist. # generally we have to make sure to remove duplicates allDeps = [] if self.topModule.moduleDependency.has_key(key): for dep in self.topModule.moduleDependency[key]: if allDeps.count(dep) == 0: allDeps.extend(dep if isinstance(dep, list) else [dep]) for module in self.moduleList: if module.moduleDependency.has_key(key): for dep in module.moduleDependency[key]: if allDeps.count(dep) == 0: allDeps.extend(dep if isinstance(dep, list) else [dep]) if len(allDeps) == 0 and CommandLine.getBuildPipelineDebug(self) > 1: sys.stderr.write("Warning: no dependencies were found") # Return a list of unique entries, in the process converting SCons # dependence entries to strings. return list(set(ProjectDependency.convertDependencies(allDeps)))
def getAllDependencies(self, key): # we must check to see if the dependencies actually exist. # generally we have to make sure to remove duplicates allDeps = [] if (self.topModule.moduleDependency.has_key(key)): for dep in self.topModule.moduleDependency[key]: if (allDeps.count(dep) == 0): allDeps.extend(dep if isinstance(dep, list) else [dep]) for module in self.moduleList: if (module.moduleDependency.has_key(key)): for dep in module.moduleDependency[key]: if (allDeps.count(dep) == 0): allDeps.extend(dep if isinstance(dep, list) else [dep]) if (len(allDeps) == 0 and CommandLine.getBuildPipelineDebug(self) > 1): sys.stderr.write("Warning: no dependencies were found") # Return a list of unique entries, in the process converting SCons # dependence entries to strings. return list(set(ProjectDependency.convertDependencies(allDeps)))
def complete(self, _text, status): """Command line completer, called with [tab] or [?] _text -- text that readline sends as what the user enters status -- the index into the completion list""" try: if status > 0: if status >= len(self.names): return None return self.names[status] else: # _no_flag, help_flag, tokens, _comment = \ command_line = CommandLine.process_input(readline.get_line_buffer()) command_line.update_system_tokens() if command_line.help_flag: # Process the [?] key first self.find_help(command_line, 0) sys.stdout.write("%s%s" % (system_state.get_prompt(), readline.get_line_buffer())) sys.stdout.flush() return [] names, token_delimeter = self.get_names_and_token_delimeter(command_line) self.names = names if not self.names: return [] if len(self.names) == 1: return self.names[0] + token_delimeter if self.names: return self.names[0] return [] except StandardError, err: sys.stderr.write(" %%%% Error detected in %s (%s)." % (file, err)) tb_str = StringIO.StringIO() traceback.print_exc(file=tb_str) tb_str.seek(0) data = tb_str.read() ermsg = "" for line in data.split("\n"): ermsg += "\n||>>>%s" % line sys.stderr.write(ermsg) sys.stderr.write(" %%%% Error ocurred in %s" % file) print return []
def process_command(self, command_string): "When somebody hits return in the shell, this method handles it." if command_string == "exit": raise EOFError try: # no_flag, help_flag, tokens, comment = libUi.process_input(command_string) # FIXME: nobody cares about the comment command_line = CommandLine.process_input(command_string) if command_line.is_empty(): # somebody just pressed return for no reason return OK, [] command_line.update_system_tokens() status, output = self.run(command_line) libUi.user_output(output, status) return status, output except ServerException, err: if err.http_code == 403: msg = ["You are not authorized for this command."] libUi.user_output(msg, FAIL) return FAIL, msg output = ["ERROR: Cannot communicate with CNM Server"] output.append(str(err)) libUi.user_output(output, FAIL)
def cmd(self, command_line): """ command_line -- all of the keywords passed in the command string, parsed """ machine_name = check_machine_name(command_line) if len(command_line) == 3 or command_line[3] == "config": command_string = "show machine %s" % machine_name command_line = CommandLine.process_input(command_string) return Show.Machine().cmd(command_line) command = command_line[3] if command == "merged": command_string = "show merged %s" % machine_name command_line = CommandLine.process_input(command_string) return Show.Merged().cmd(command_line) elif command == "status": command_string = "show status %s" % machine_name command_line = CommandLine.process_input(command_string) return Show.Status().cmd(command_line) elif command == "summary": command_string = "show summary %s" % machine_name command_line = CommandLine.process_input(command_string) return Show.Summary().cmd(command_line) if len(command_line) != 5: msg = "Incomplete command; need to include %s name." % command raise CommandError(msg) object_name = command_line[4] if command == "bom": command_string = "show bom %s" % object_name command_line = CommandLine.process_input(command_string) return Show.Bom().cmd(command_line) elif command == "include": command_string = "show include %s" % object_name command_line = CommandLine.process_input(command_string) return Show.Include().cmd(command_line) raise CommandError("Unknown command: %s" % command)
def __init__(self, env, modulePickle, arguments, cmdLineTgts): # do a pattern match on the synth boundary paths, which we need to build # the module structure self.env = env CommandBaseline = env.Command # override certain scons build functions to de-sugar # build-pipeline's object types. def CommandOverride(tgts, srcs, cmds): # did we get a flat object? srcsSugared = srcs if (not isinstance(srcs, list)): srcsSugared = [srcs] modifiedSrcs = [] #might want convert dependencies here. for src in srcsSugared: if (isinstance(src, Source.Source)): modifiedSrcs.append(str(src)) else: modifiedSrcs.append(src) return CommandBaseline(tgts, modifiedSrcs, cmds) self.env.Command = CommandOverride self.arguments = arguments self.cmdLineTgts = cmdLineTgts self.buildDirectory = env['DEFS']['BUILD_DIR'] self.compileDirectory = env['DEFS']['TMP_FPGA_DIR'] givenVerilogs = Utils.clean_split(env['DEFS']['GIVEN_VERILOGS'], sep=' ') givenVerilogPkgs = Utils.clean_split(env['DEFS']['GIVEN_VERILOG_PKGS'], sep=' ') givenVerilogHs = Utils.clean_split(env['DEFS']['GIVEN_VERILOG_HS'], sep=' ') givenNGCs = Utils.clean_split(env['DEFS']['GIVEN_NGCS'], sep=' ') givenVHDs = Utils.clean_split(env['DEFS']['GIVEN_VHDS'], sep=' ') self.apmName = env['DEFS']['APM_NAME'] self.apmFile = env['DEFS']['APM_FILE'] self.moduleList = [] self.modules = {} # Convenient dictionary self.awbParamsObj = AWBParams.AWBParams(self) self.isDependsBuild = (CommandLine.getCommandLineTargets(self) == [ 'depends-init' ]) #We should be invoking this elsewhere? #self.wrapper_v = env.SConscript([env['DEFS']['ROOT_DIR_HW_MODEL'] + '/SConscript']) # this really doesn't belong here. if env['DEFS']['GIVEN_CS'] != '': SW_EXE_OR_TARGET = env['DEFS'][ 'ROOT_DIR_SW'] + '/obj/' + self.apmName + '_sw.exe' SW_EXE = [SW_EXE_OR_TARGET] else: SW_EXE_OR_TARGET = '$TARGET' SW_EXE = [] self.swExeOrTarget = SW_EXE_OR_TARGET self.swExe = SW_EXE self.swIncDir = Utils.clean_split(env['DEFS']['SW_INC_DIRS'], sep=' ') self.swLibs = Utils.clean_split(env['DEFS']['SW_LIBS'], sep=' ') self.swLinkLibs = Utils.clean_split(env['DEFS']['SW_LINK_LIBS'], sep=' ') self.m5BuildDir = env['DEFS']['M5_BUILD_DIR'] if len(env['DEFS']['GIVEN_ELFS']) != 0: elf = ' -bd ' + str.join( ' -bd ', Utils.clean_split(env['DEFS']['GIVEN_ELFS'], sep=' ')) else: elf = '' self.elf = elf # # Use a cached post par ncd to guide map and par? This is off by default since # the smart guide option can make place & route fail when it otherwise would have # worked. It doesn't always improve run time, either. To turn on smart guide # either define the environment variable USE_SMARTGUIDE or set # USE_SMARTGUIDE on the scons command line to a non-zero value. # self.smartguide_cache_dir = env['DEFS'][ 'WORKSPACE_ROOT'] + '/var/xilinx_ncd' self.smartguide_cache_file = self.apmName + '_par.ncd' try: os.mkdir(self.smartguide_cache_dir) except OSError, e: if e.errno == errno.EEXIST: pass
def getDependencies(self, module, key): allDeps = module.getDependencies(key) if len(allDeps) == 0 and CommandLine.getBuildPipelineDebug(self) > 1: sys.stderr.write("Warning: no dependencies were found") return allDeps
def main(argv): if len(argv) == 0: raise Exception('Please specify a Pipeline File') #Reads the configuration file data = FileIO.getJsonFromFile(argv[0]) posepipeline = PosePipeline.PosePipeline() #holds state = {} posepipeline.folder = FileIO.CreateFolder("./PipelineLogs/" + FileIO.GetAnimalName()) #saves pipeline configuration on the outputfolder FileIO.putFileWithJson(data, "pipeline", posepipeline.folder + "/") #hash of aruco detector classes arucodetectors = { 'singular': SingleArucosDetector.SingleArucosDetector, 'allforone': CangalhoPnPDetector.CangalhoPnPDetector, 'depthforone': CangalhoProcrustesDetector.CangalhoProcrustesDetector } #Assigns the InputStream if data['input']['type'] == 'IMG': #must set path where images are posepipeline.imgStream = ImgStreamReader.ImgStreamReader( data['input']['path']) elif data['input']['type'] == 'ROS': camNames = [] #sets cameras if there are any if "cameras" in data['model']: camNames = data['model']['cameras'] posepipeline.imgStream = RosStreamReader.RosStreamReader( camNames=camNames, inputData=data['input']) #setting stuff on state state['intrinsics'] = FileIO.getIntrinsics( posepipeline.imgStream.camNames) state['arucodata'] = FileIO.getJsonFromFile(data['model']['arucodata']) state['arucomodel'] = FileIO.getFromPickle(data['model']['arucomodel']) elif data['input']['type'] == 'ROS_GATHER': print("ROS GATHER MODE") camNames = [] #sets cameras if there are any if "cameras" in data['model']: camNames = data['model']['cameras'] posepipeline.imgStream = RosGatherStreamReader.RosGatherStreamReader( camNames=camNames, inputData=data['input']) #setting stuff on state state['intrinsics'] = FileIO.getIntrinsics( posepipeline.imgStream.camNames) state['arucodata'] = FileIO.getJsonFromFile(data['model']['arucodata']) state['arucomodel'] = FileIO.getFromPickle(data['model']['arucomodel']) posepipeline.pcpublisher = PC2Publisher.PC2Publisher( state['intrinsics'], posepipeline.imgStream.camNames) elif data['input']['type'] == 'SYNTH': posepipeline.imgStream = StreamReader.StreamReader() state['synthmodel'] = FileIO.getFromPickle(data['model']['model']) if data['model']['type'] == "SYNTH_CAMERA": state['modelscene'] = FileIO.getFromPickle( data['model']['modelscene']) print(state['modelscene']) #print(state['synthmodel'][0]) #print(state['synthmodel'][1]) #visu.ViewRefs(state['synthmodel'][0],state['synthmodel'][1],refSize=1,showRef=True,saveImg=True,saveName=posepipeline.folder+"/screenshot.jpg") posepipeline.posescalculator = PosesCalculatorSynth.PosesCalculatorSynth( {"N_objects": len(state['synthmodel'][0])}) else: print("This Pipeline input is invalid") #Assigns observation maker and posecalculator if data['model']['type'] == 'CANGALHO': #static parameters singlecamData = { "K": state['intrinsics']['K'][imgStream.camNames[0]], "D": state['intrinsics']['D'][imgStream.camNames[0]], "arucodata": state['arucodata'] } #sets observation maker posepipeline.ObservationMaker = CangalhoObservationsMaker.CangalhoObservationMaker( singlecamData) #sets pose calculator posedata = { "N_objects": len(state['arucodata']['ids']), "record": data["model"]["record"] } posepipeline.posescalculator = PosesCalculator.PosesCalculator( posedata) elif data['model']['type'] == 'CAMERA': #static parameters multicamData = { "intrinsics": state['intrinsics'], "arucodata": state['arucodata'], "camnames": posepipeline.imgStream.camNames, "arucomodel": state['arucomodel'], "innerpipeline": { "arucodetector": arucodetectors[data['model']['arucodetection']]({ 'arucodata': state['arucodata'], 'arucomodel': state['arucomodel'] }) } } #sets observation maker posepipeline.ObservationMaker = CamerasObservationMaker.CamerasObservationMaker( multicamData) #sets pose calculator posedata = { "N_objects": len(posepipeline.imgStream.camNames), "record": data["model"]["record"] } #sets observation treatment if data['model']['mode']['type'] == 'REGULAR': posepipeline.posescalculator = PosesCalculator.PosesCalculator( posedata) elif data['model']['mode']['type'] == 'OUTLIERREMOVE': print("YOOO") #static parameters posedata['observations'] = data['model']['mode']['observations'] posedata['Rcutoff'] = data['model']['mode']['Rcutoff'] posedata['Tcutoff'] = data['model']['mode']['Tcutoff'] print(posedata) posepipeline.posescalculator = OutlierRemPoseCalculator.OulierRemovalPoseCalculator( posedata) else: print("This pose calculator is invalid") elif data['model']['type'] == 'SYNTH_CANGALHO': obsdata = data['model'] obsdata['synthmodel'] = state['synthmodel'] posepipeline.ObservationMaker = CangalhoSynthObsMaker.CangalhoSynthObsMaker( obsdata) elif data['model']['type'] == 'SYNTH_CAMERA': obsdata = data['model'] obsdata['synthmodel'] = state['synthmodel'] obsdata['modelscene'] = state['modelscene'] visu.ViewRefs(obsdata['modelscene'][0], obsdata['modelscene'][1]) posepipeline.ObservationMaker = CameraSynthObsMaker.CameraSynthObsMaker( obsdata) else: print("This Pipeline Model is invalid") #sets thread for terminal window CommandLine.Start(posepipeline, CommandsImporterPose.CommandsImporterPose) #sets thread for pipeline t1 = threading.Thread(target=worker, args=(posepipeline, )) t1.start() #spins ros if necessary if data['input']['type'] == 'ROS' or data['input']['type'] == 'ROS_GATHER': try: rospy.spin() except KeyboardInterrupt: print("shut") print("Exited Stuff") posepipeline.Stop() t1.join() print("FINISHED ELEGANTLY") #see and save resulting scene print(posepipeline.posescalculator.R) print(posepipeline.posescalculator.t) visu.ViewRefs(posepipeline.posescalculator.R, posepipeline.posescalculator.t, refSize=0.1, showRef=True, saveImg=True, saveName=posepipeline.folder + "/screenshot.jpg") #record r and t if "record" in data["model"] and data["model"]["record"] == True: recordeddata = { "R": posepipeline.posescalculator.recordedRs, "T": posepipeline.posescalculator.recordedTs } print(len(recordeddata['R'])) print(len(recordeddata['T'])) FileIO.saveAsPickle("/recorded", recordeddata, posepipeline.folder, False, False) datatosave = { "R": posepipeline.posescalculator.R, "t": posepipeline.posescalculator.t } if data['input']['type'] == 'ROS' or data['input']['type'] == 'ROS_GATHER': datatosave['camnames'] = posepipeline.imgStream.camNames FileIO.saveAsPickle("/poses", datatosave, posepipeline.folder, False, False)
def getDependencies(self, module, key): allDeps = module.getDependencies(key) if (len(allDeps) == 0 and CommandLine.getBuildPipelineDebug(self) > 1): sys.stderr.write("Warning: no dependencies were found") return allDeps
def main(): '''Main function for the primer design program. Imports primerDesign, Bio, CommandLine, and sequenceAnalysis. Main function takes in inputs and boolean checks from the command line. These include the target sequence, restriction enzymes one and two, any changes to start and stop codons, and a verbosity check which will enable printing either to standard output or a output file. Output would appear as the following after a successful run of main: ############################################################ FastA Header Forward Primer Primer Sequence {} nucleotides were added to give {} efficiency after {} hours. Buffer {} for digestion at {} degrees. Melting Temperature Forward GC Content Percentage Forward Reverse Primer Primer Sequence {} nucleotides were added to give {} efficiency after {} hours. Buffer {} for digestion at {} degrees. Melting Temperature Reverse GC Content Percentage Reverse ############################################################ Should errors occur such as improper target sequence or (Fill in the check conditions we can think of here), a message will be displayed indicating the potential problem to the user: Error: This program has detected that (Situation). Please correct your (Situation) and try again. After the message is displayed, the program will exit and return back to the terminal line. ''' ################################################################################################### #Main method variables cl = CommandLine.Command_Line() gcForward = None gcReverse = None #Gather the restriction enzymes restrictionEnzyme1 = cl.args.enzymeOne restrictionEnzyme2 = cl.args.enzymeTwo #Gather the start and stop codon startCodon = cl.args.start stopCodon = cl.args.stop #Verbosity Boolean verb = cl.args.verbosity #File Name targetFile = cl.args.target #Marker Number markerNumber = 60 #Degree Symbol degree = "\u00b0" ################################################################################################### #Check to see if all of the required elements are in place. If any value is at none, terminate the program with a message if targetFile is None: print( "No Target Sequence inputted. Please retry with a proper input file in the appropriate location. See -h for command line help." ) sys.exit() else: pass if restrictionEnzyme1 is None: print( "No Enzyme One inputted. Please retry with an Enzyme One in the appropriate location. See -h for command line help." ) sys.exit() else: pass if restrictionEnzyme2 is None: print( "No Enzyme Two inputted. Please retry with an Enzyme Two in the appropriate location. See -h for command line help." ) sys.exit() else: pass ################################################################################################### fastA = sequenceAnalysis.FastAreader( targetFile) #Read from the file collected by CommandLine class for head, seq in fastA.readFasta( ): #Ideally there should be only one fastA to read given a run. #If we decide otherwise, place all of the class method calls within the loop createdPrimer = primerDesign.primerDesign(head, seq, restrictionEnzyme1, restrictionEnzyme2, startCodon, stopCodon) #createdPrimer.buildPrimers() #Build the primers using the built in buildPrimers method. Results are stored in the class object nucForward = sequenceAnalysis.NucParams(str(createdPrimer.forwardPrimer)) nucReverse = sequenceAnalysis.NucParams(str(createdPrimer.reversePrimer)) gcForward = (nucForward.nucComposit["G"] + nucForward.nucComposit["C"]) / nucForward.nucCount() gcReverse = (nucReverse.nucComposit["G"] + nucReverse.nucComposit["C"]) / nucReverse.nucCount() ################################################################################################### #Printing Section #Print either to an output file or std out depending upon verbosity condition if verb is True: #Verbosity mode output. If enabled, writes to a file instead of std out. with open("PrimerOut.txt", "w") as p: p.write("#" * markerNumber + "\n\n") p.write(createdPrimer.header + "\n\n") p.write("Forward Primer\n") p.write(createdPrimer.finalFwdPrimer + "\n") p.write("\n") p.write( "'{0}' nucleotides were added to give {1} efficiency after {2} hours.\n" .format( createdPrimer.restrictionEnzymeDict[createdPrimer.enzyme1] [0], createdPrimer.restrictionEnzymeDict[ createdPrimer.enzyme1][1], createdPrimer.restrictionEnzymeDict[ createdPrimer.enzyme1][2])) p.write("Buffer {} for digestion at {} Degrees.\n\n".format( createdPrimer.restrictionEnzymeDict[createdPrimer.enzyme1][3], createdPrimer.restrictionEnzymeDict[createdPrimer.enzyme1][4])) p.write("Melting Temperature = " + str(createdPrimer.tempOfFwd) + degree + "C" + "\n") p.write("\n") p.write(str.format("{0:.4f}", gcForward) + " % GC Content\n") p.write("\n") p.write("Reverse Primer\n") p.write(createdPrimer.finalRevPrimer + "\n") p.write("\n") p.write( "'{0}' nucleotides were added to give {1} efficiency after {2} hours.\n" .format( createdPrimer.restrictionEnzymeDict[createdPrimer.enzyme2] [0], createdPrimer.restrictionEnzymeDict[ createdPrimer.enzyme2][1], createdPrimer.restrictionEnzymeDict[ createdPrimer.enzyme2][2])) p.write("Buffer {} for digestion at {} Degrees.\n\n".format( createdPrimer.restrictionEnzymeDict[createdPrimer.enzyme2][3], createdPrimer.restrictionEnzymeDict[createdPrimer.enzyme2][4])) p.write("Melting Temperature = " + str(createdPrimer.tempOfRev) + degree + "C" + "\n") p.write("\n") p.write(str.format("{0:.4f}", gcReverse) + " % GC Content\n") p.write("#" * markerNumber) else: #Print to std out instead of to a file print("#" * markerNumber) print(createdPrimer.header) print() print("Forward Primer") print(createdPrimer.finalFwdPrimer) print() print( "'{0}' nucleotides were added to give {1} efficiency after {2} hours.\n" .format( createdPrimer.restrictionEnzymeDict[createdPrimer.enzyme1][0], createdPrimer.restrictionEnzymeDict[createdPrimer.enzyme1][1], createdPrimer.restrictionEnzymeDict[createdPrimer.enzyme1][2])) print("Buffer {} for digestion at {} Degrees.\n".format( createdPrimer.restrictionEnzymeDict[createdPrimer.enzyme1][3], createdPrimer.restrictionEnzymeDict[createdPrimer.enzyme1][4])) print("Melting Temperature = " + str(createdPrimer.tempOfFwd) + "C") print() print(str.format("{0:.4f}", gcForward) + " % GC Content\n") print() print("Reverse Primer") print(createdPrimer.finalRevPrimer) print() print( "'{0}' nucleotides were added to give {1} efficiency after {2} hours.\n" .format( createdPrimer.restrictionEnzymeDict[createdPrimer.enzyme2][0], createdPrimer.restrictionEnzymeDict[createdPrimer.enzyme2][1], createdPrimer.restrictionEnzymeDict[createdPrimer.enzyme2][2])) print("Buffer {} for digestion at {} Degrees.\n".format( createdPrimer.restrictionEnzymeDict[createdPrimer.enzyme2][3], createdPrimer.restrictionEnzymeDict[createdPrimer.enzyme2][4])) print() print("Melting Temperature = " + str(createdPrimer.tempOfRev) + "C") print() print(str.format("{0:.4f}", gcReverse) + " % GC Content\n") print() print("#" * markerNumber)
import CommandLine import File import UserDB if __name__ == "__main__": shell = CommandLine.CLI(File.FileManager(), UserDB.UserManager()) shell.main_loop()
def __init__(self, env, modulePickle, arguments, cmdLineTgts): # do a pattern match on the synth boundary paths, which we need to build # the module structure self.env = env CommandBaseline = env.Command # override certain scons build functions to de-sugar # build-pipeline's object types. def CommandOverride(tgts, srcs, cmds): # did we get a flat object? srcsSugared = srcs if not isinstance(srcs, list): srcsSugared = [srcs] modifiedSrcs = [] # might want convert dependencies here. for src in srcsSugared: if isinstance(src, Source.Source): modifiedSrcs.append(str(src)) else: modifiedSrcs.append(src) return CommandBaseline(tgts, modifiedSrcs, cmds) self.env.Command = CommandOverride self.arguments = arguments self.cmdLineTgts = cmdLineTgts self.buildDirectory = env["DEFS"]["BUILD_DIR"] self.compileDirectory = env["DEFS"]["TMP_XILINX_DIR"] givenVerilogs = Utils.clean_split(env["DEFS"]["GIVEN_VERILOGS"], sep=" ") givenNGCs = Utils.clean_split(env["DEFS"]["GIVEN_NGCS"], sep=" ") givenVHDs = Utils.clean_split(env["DEFS"]["GIVEN_VHDS"], sep=" ") self.apmName = env["DEFS"]["APM_NAME"] self.apmFile = env["DEFS"]["APM_FILE"] self.moduleList = [] self.modules = {} # Convenient dictionary self.awbParamsObj = AWBParams.AWBParams(self) self.isDependsBuild = CommandLine.getCommandLineTargets(self) == ["depends-init"] # We should be invoking this elsewhere? # self.wrapper_v = env.SConscript([env['DEFS']['ROOT_DIR_HW_MODEL'] + '/SConscript']) # this really doesn't belong here. if env["DEFS"]["GIVEN_CS"] != "": SW_EXE_OR_TARGET = env["DEFS"]["ROOT_DIR_SW"] + "/obj/" + self.apmName + "_sw.exe" SW_EXE = [SW_EXE_OR_TARGET] else: SW_EXE_OR_TARGET = "$TARGET" SW_EXE = [] self.swExeOrTarget = SW_EXE_OR_TARGET self.swExe = SW_EXE self.swIncDir = Utils.clean_split(env["DEFS"]["SW_INC_DIRS"], sep=" ") self.swLibs = Utils.clean_split(env["DEFS"]["SW_LIBS"], sep=" ") self.swLinkLibs = Utils.clean_split(env["DEFS"]["SW_LINK_LIBS"], sep=" ") self.m5BuildDir = env["DEFS"]["M5_BUILD_DIR"] if len(env["DEFS"]["GIVEN_ELFS"]) != 0: elf = " -bd " + str.join(" -bd ", Utils.clean_split(env["DEFS"]["GIVEN_ELFS"], sep=" ")) else: elf = "" self.elf = elf # # Use a cached post par ncd to guide map and par? This is off by default since # the smart guide option can make place & route fail when it otherwise would have # worked. It doesn't always improve run time, either. To turn on smart guide # either define the environment variable USE_SMARTGUIDE or set # USE_SMARTGUIDE on the scons command line to a non-zero value. # self.smartguide_cache_dir = env["DEFS"]["WORKSPACE_ROOT"] + "/var/xilinx_ncd" self.smartguide_cache_file = self.apmName + "_par.ncd" try: os.mkdir(self.smartguide_cache_dir) except OSError, e: if e.errno == errno.EEXIST: pass
def main(path, view=True): #Reads the configuration file data = FileIO.getJsonFromFile(path) posepipeline = PosePipeline.PosePipeline() #holds stuff state = {} #hash of aruco detector classes arucodetectors = { 'singular': SingleArucosDetector.SingleArucosDetector, 'allforone': CangalhoPnPDetector.CangalhoPnPDetector, 'depthforone': CangalhoProcrustesDetector.CangalhoProcrustesDetector } #In case we want to get the cameras we need the arucomodel if "CAMERA" in data['model']['type']: state['arucomodel'] = FileIO.getFromPickle(data['model']['arucomodel']) if "record" not in data["model"]: data["model"]['record'] = False #Assigns the InputStream if data['input']['type'] == 'IMG': #must set path where images are posepipeline.imgStream = ImgStreamReader.ImgStreamReader( data['input']['path']) elif data['input']['type'] == 'ROS': camNames = [] #sets cameras if there are any if "cameras" in data['model']: camNames = data['model']['cameras'] posepipeline.imgStream = RosStreamReader.RosStreamReader( camNames=camNames, inputData=data['input']) #setting stuff on state state['intrinsics'] = FileIO.getIntrinsics( posepipeline.imgStream.camNames) state['arucodata'] = FileIO.getJsonFromFile(data['model']['arucodata']) elif data['input']['type'] == 'ROS_GATHER': camNames = [] #sets cameras if there are any if "cameras" in data['model']: camNames = data['model']['cameras'] posepipeline.imgStream = RosGatherStreamReader.RosGatherStreamReader( camNames=camNames, inputData=data['input']) #setting stuff on state state['intrinsics'] = FileIO.getIntrinsics( posepipeline.imgStream.camNames) state['arucodata'] = FileIO.getJsonFromFile(data['model']['arucodata']) state['arucomodel'] = FileIO.getFromPickle(data['model']['arucomodel']) elif data['input']['type'] == 'SYNTH': posepipeline.imgStream = StreamReader.StreamReader() state['synthmodel'] = FileIO.getFromPickle(data['model']['arucomodel']) if data['model']['type'] == "SYNTH_CAMERA" or data['model'][ 'type'] == "SYNTH_CAMERA2": state['modelscene'] = FileIO.getFromPickle( data['model']['modelscene']) #print(state['synthmodel'][0]) #print(state['synthmodel'][1]) #visu.ViewRefs(state['synthmodel'][0],state['synthmodel'][1],refSize=1,showRef=True,saveImg=True,saveName=posepipeline.folder+"/screenshot.jpg") if "CAMERA" in data['model']['type']: posepipeline.posescalculator = PosesCalculatorSynth.PosesCalculatorSynth( {"N_objects": len(state['modelscene']['R'])}) else: posepipeline.posescalculator = PosesCalculatorSynth.PosesCalculatorSynth( {"N_objects": len(state['synthmodel']['R'])}) else: print("This Pipeline input is invalid") #Assigns observation maker and posecalculator if data['model']['type'] == 'CANGALHO': #static parameters singlecamData = { "K": state['intrinsics'][ posepipeline.imgStream.camNames[0]]['rgb']['K'], "D": state['intrinsics'][posepipeline.imgStream.camNames[0]]['rgb'] ['D'], "arucodata": state['arucodata'] } #sets observation maker posepipeline.ObservationMaker = CangalhoObservationsMaker.CangalhoObservationMaker( singlecamData) #sets pose calculator posedata = { "N_objects": len(state['arucodata']['ids']), } if 'record' in data["model"]: posedata["record"] = data["model"]["record"] else: posedata["record"] = False posepipeline.posescalculator = PosesCalculator.PosesCalculator( posedata) elif data['model']['type'] == 'CAMERA': state['arucomodel'] = FileIO.getFromPickle(data['model']['arucomodel']) #static parameters multicamData = { "intrinsics": state['intrinsics'], "arucodata": state['arucodata'], "camnames": posepipeline.imgStream.camNames, "arucomodel": state['arucomodel'], "innerpipeline": { "arucodetector": arucodetectors[data['model']['arucodetection']]({ 'arucodata': state['arucodata'], 'arucomodel': state['arucomodel'] }) } } #sets observation maker posepipeline.ObservationMaker = CamerasObservationMaker.CamerasObservationMaker( multicamData) #sets pose calculator posedata = { "N_objects": len(posepipeline.imgStream.camNames), "record": data["model"]["record"] } #sets observation treatment if data['model']['mode']['type'] == 'REGULAR': posepipeline.posescalculator = PosesCalculator.PosesCalculator( posedata) elif data['model']['mode']['type'] == 'OUTLIERREMOVE': print("YOOO") #static parameters posedata['observations'] = data['model']['mode']['observations'] posedata['Rcutoff'] = data['model']['mode']['Rcutoff'] posedata['Tcutoff'] = data['model']['mode']['Tcutoff'] print(posedata) posepipeline.posescalculator = OutlierRemPoseCalculator.OulierRemovalPoseCalculator( posedata) else: print("This pose calculator is invalid") elif data['model']['type'] == 'SYNTH_CANGALHO': #in order to not copy by reference https://stackoverflow.com/questions/3975376/understanding-dict-copy-shallow-or-deep obsdata = copy.deepcopy(data['model']) obsdata['synthmodel'] = state['synthmodel'] posepipeline.ObservationMaker = CangalhoSynthObsMaker.CangalhoSynthObsMaker( obsdata) elif data['model']['type'] == 'SYNTH_CAMERA': #in order to not copy by reference https://stackoverflow.com/questions/3975376/understanding-dict-copy-shallow-or-deep obsdata = copy.deepcopy(data['model']) obsdata['synthmodel'] = state['synthmodel'] obsdata['modelscene'] = state['modelscene'] if (view == True): visu.ViewRefs(obsdata['modelscene'][0], obsdata['modelscene'][1]) posepipeline.ObservationMaker = CameraSynthObsMaker.CameraSynthObsMaker( obsdata) elif data['model']['type'] == 'SYNTH_CAMERA2': #in order to not copy by reference https://stackoverflow.com/questions/3975376/understanding-dict-copy-shallow-or-deep obsdata = copy.deepcopy(data['model']) obsdata['synthmodel'] = state['synthmodel'] obsdata['modelscene'] = state['modelscene'] if view: print("DAATAAAA") print(data['model']) #visu.ViewRefs(obsdata['modelscene']['R'],obsdata['modelscene']['t']) posepipeline.ObservationMaker = CameraSynthObsMaker2.CameraSynthObsMaker2( obsdata) elif data['model']['type'] == 'SYNTH_CANGALHO2': state['arucodata'] = FileIO.getJsonFromFile(data['model']['arucodata']) state['arucodata']['idmap'] = aruco.markerIdMapper( state['arucodata']['ids']) #in order to not copy by reference https://stackoverflow.com/questions/3975376/understanding-dict-copy-shallow-or-deep obsdata = copy.deepcopy(data['model']) obsdata['synthmodel'] = state['synthmodel'] obsdata['arucodata'] = state['arucodata'] if view: print("DAATAAAA") print(data['model']) #visu.ViewRefs(obsdata['modelscene']['R'],obsdata['modelscene']['t']) posepipeline.ObservationMaker = CangalhoSynthObsMaker2.CangalhoSynthObsMaker2( obsdata) else: print("This Pipeline Model is invalid") #sets thread for terminal window CommandLine.Start(posepipeline, CommandsImporterPose.CommandsImporterPose) #sets thread for pipeline t1 = threading.Thread(target=worker, args=(posepipeline, )) t1.start() #spins ros if necessary if data['input']['type'] == 'ROS' or data['input']['type'] == 'ROS_GATHER': try: rospy.spin() except KeyboardInterrupt: print("shut") print("Stop Threads") posepipeline.Stop() #CommandLine.Stop() t1.join() print("Finished :)") #Only create log if full process was done if posepipeline.posescalculator.t is not None: #Create the folder posepipeline.folder = FileIO.CreateFolder( "./Logs/", suffix=FileIO.GetAnimalName()) #print("DATA IS") #print(data) #saves pipeline configuration on the outputfolder print(posepipeline.folder) FileIO.putFileWithJson(data, "pipeline", posepipeline.folder + "/") datatosave = { "R": posepipeline.posescalculator.R, "t": posepipeline.posescalculator.t } #compute the corners of the cangalho if data['model']['type'] == 'CANGALHO' or data['model'][ 'type'] == 'SYNTH_CANGALHO2': arucoModel = { "R": posepipeline.posescalculator.R, "t": posepipeline.posescalculator.t } corners = aruco.ComputeCorners(state['arucodata'], arucoModel) if (view == True): visu.SeePositions(corners) datatosave['corners'] = corners if (view): #see and save resulting scene print(posepipeline.posescalculator.R) print(posepipeline.posescalculator.t) visu.ViewRefs(posepipeline.posescalculator.R, posepipeline.posescalculator.t, refSize=0.1, showRef=True, saveImg=True, saveName=posepipeline.folder + "/screenshot.jpg") #record r and t if data["model"]["record"] == True: recordeddata = { "R": posepipeline.posescalculator.recordedRs, "T": posepipeline.posescalculator.recordedTs } FileIO.saveAsPickle("/recorded", recordeddata, posepipeline.folder, False, False) if data['input']['type'] == 'ROS' or data['input'][ 'type'] == 'ROS_GATHER': datatosave['camnames'] = posepipeline.imgStream.camNames FileIO.saveAsPickle("/poses", datatosave, posepipeline.folder, False, False) return posepipeline.folder
def test_WrongInput(self): ''' Checks if output is equal -1 for wrong input. ''' output = CommandLine.CommandLine(12) self.assertEqual(output, -1)
def parseCommandLine(self): """ Calls The Command Line Parser, Loading Arguments into a Dictionary """ self.commandLineArguments = CommandLine.parse_command_line()
def cmd(self, command_line): """ command_line -- all of the keywords passed in the command string, parsed """ if len(command_line) < 2: raise CommandError("Incomplete command.") machine_name = check_machine_name(command_line) if len(command_line) == 2: system_state.push_prompt(["machine", machine_name]) return OK, [] command = command_line[2].lower() bg_flag = command_line.bg_flag if command == "ssh": command_string = "ssh %s" % machine_name command_line = CommandLine.process_input(command_string) return Ssh().cmd(command_line) cnm = system_state.cnm_connector try: if command == "push": return cnm.push_machine_config(machine_name, bg_flag) elif command == "unpush": return cnm.unpush_machine_config(machine_name, bg_flag) elif command in ["install", "uninstall", "verify", "configure", "execute", "purge", "fix", "backup", "restore"]: argument = '' if command == "restore": if len(command_line) != 5: msg = "Incomplete command; require a restore target" raise CommandError(msg) argument = command_line[4] if command == "execute": if len(command_line) != 5: msg = "Incomplete command; require a package name "\ "and a command." raise CommandError(msg) command = command_line[4] if len(command_line) <= 3: msg = "Incomplete command; require a package name." raise CommandError(msg) package_name = command_line[3] return cnm.package_command(command, machine_name, package_name, argument, bg_flag) elif command in [ "enable", "setup" ]: prompt = "%s administrative ssh password: "******"enable": return cnm.enable_command(machine_name, password, bg_flag) else: return cnm.setup_command(machine_name, password, bg_flag) elif command == [ "disable" ]: post_data = {"yaml": yaml.dump( {"machine_type": BDR_CLIENT_TYPE })} return cnm.machine_job(machine_name, "disable", bg_flag, post_data) elif command == "dist": if len(command_line) <= 3: msg = "Incomplete command; requires a dist file name." raise CommandError(msg) dist_name = command_line[-1] return cnm.dist_command(machine_name, dist_name, bg_flag) elif command in ['test', 'init', 'reconcile', 'check-status']: return cnm.machine_job(machine_name, command, bg_flag) else: raise CommandError("%s is not a valid command" % command) except MachineTraceback, m_err: libUi.process_traceback(m_err) return FAIL, []