def showDoc(self, f): """Usefull to print <methodname>.__doc__. Example: 'myplot.showDoc(myplot.showDoc.__doc__)' shows this very docs string without \\t, \\n or identation. """ print trim.trim(f)
def format_img(self): fmt = self.img_path.split(".")[-1] img_name = "".join(self.img_path.split(".")[:-1]) fmt = fmt.lower() if fmt != "jpg": img = Image.open(self.img_path) rgb_im = img.convert('RGB') rgb_im.save('{}.jpg'.format(img_name)) os.remove(self.img_path) fmt = '.jpg' self.img_path = img_name + fmt trim.trim(self.img_path)
def addChineseModel(col): mm = col.models m = mm.new(_("Chinese")) fm = mm.newField(_("Hanzi")) mm.addField(m, fm) fm = mm.newField(_("Meaning")) mm.addField(m, fm) fm = mm.newField(_("Ruby")) mm.addField(m, fm) fm = mm.newField(_("Tones")) mm.addField(m, fm) t = mm.newTemplate(_("Recognition")) t['qfmt'] = "<div class=chinese>{{Hanzi}}</div>" t['afmt'] = trim(""" {{FrontSide}} <hr id=answer> <div class=chinese>{{furigana:Ruby}}</div> <div>{{Meaning}}</div> """) mm.addTemplate(m, t) t = mm.newTemplate(_("Recall")) t['qfmt'] = "<div>{{Meaning}}</div>" t['afmt'] = trim(""" {{FrontSide}} <hr id=answer> <div class=chinese>{{furigana:Ruby}}</div> """) mm.addTemplate(m, t) # css # Get rid of Arial. Without setting any font, a system standard # should be used, which should be more to the taste of the user.. m['css'] = re.sub(' font-family: arial;\n', '', m['css']) m['css'] += trim(u""" .chinese { font-size: 30px } .win .chinese { font-family: "MS Mincho", "MS 明朝"; } .mac .chinese { font-family: "Hiragino Mincho Pro", "ヒラギノ明朝 Pro"; } .linux .chinese { font-family: "Kochi Mincho", "東風明朝"; } .mobile .chinese { font-family: "Hiragino Mincho ProN"; } .tone1 {color: red;} .tone2 {color: orange;} .tone3 {color: green;} .tone4 {color: blue;} .tone5 {color: black;} .tone6 {} .tone7 {} .tone8 {} .tone9 {} """) # recognition card mm.add(m) return m
def addChineseModel(col): mm = col.models m = mm.new(_("Chinese")) fm = mm.newField(_("Hanzi")) mm.addField(m, fm) fm = mm.newField(_("Meaning")) mm.addField(m, fm) fm = mm.newField(_("Ruby")) mm.addField(m, fm) fm = mm.newField(_("Tones")) mm.addField(m, fm) t = mm.newTemplate(_("Recognition")) t['qfmt'] = "<div class=chinese>{{Hanzi}}</div>" t['afmt'] = trim(""" {{FrontSide}} <hr id=answer> <div class=chinese>{{furigana:Ruby}}</div> <div>{{Meaning}}</div> """) mm.addTemplate(m, t) t = mm.newTemplate(_("Recall")) t['qfmt'] = "<div>{{Meaning}}</div>" t['afmt'] = trim(""" {{FrontSide}} <hr id=answer> <div class=chinese>{{furigana:Ruby}}</div> """) mm.addTemplate(m, t) # css m['css'] += trim(u""" .chinese { font-size: 30px } .win .chinese { font-family: "MS Mincho", "MS 明朝"; } .mac .chinese { font-family: "Hiragino Mincho Pro", "ヒラギノ明朝 Pro"; } .linux .chinese { font-family: "Kochi Mincho", "東風明朝"; } .mobile .chinese { font-family: "Hiragino Mincho ProN"; } .tone1 {color: red;} .tone2 {color: orange;} .tone3 {color: green;} .tone4 {color: blue;} .tone5 {color: black;} .tone6 {} .tone7 {} .tone8 {} .tone9 {} """) # recognition card mm.add(m) return m
def crop_photos(self): """ crop_photos: crops 'raw' photos to be approximately inline with the outline of the object """ workingDir = rawImagesTestDir if self.TestModeOn else rawImagesRealDir imageList = os.listdir(workingDir) if not imageList: raise SystemExit("Please populate " + workingDir + " with images. It's currently empty.") print "[x] Initiating image cropping" counter = 1 for imStr in imageList: trim.trim(workingDir + imStr, croppedImagesDir + "croppedIm" + str(counter) + ".jpg", self.Parameters.mps, self.Parameters.ctl, counter) counter += 1
def save_res(data,task): conn = connMysql() cur = conn.cursor() sql = "UPDATE `gr`.`main` SET `res` = "+trim(data)+" where running_id= " +str(task[1]) cur.execute(sql) conn.commit() cur.close conn.close()
def appSubSet(S, t, e): n = len(S) L = [[]] * (n + 1) L[0] = [0] for i in range(1, n + 1): L[i] = mergeLists(L[i - 1].copy(), plus(L[i - 1].copy(), S[i - 1])) L[i] = tr.trim(L[i], e / (2 * n)) removeEach(L[i], t) #print(L) return max(L[n])
def main(): args = parseArgument() #옵션에 따라 진행 if args.cmd=='search': import search search.contains(args.input, args.output, args.term, args.regex) elif args.cmd=='delete': import delete if args.duplicatedLines: delete.duplicatedLines(args.input, args.output, args.withoutBlankLines) else: delete.contains(args.input, args.output, args.term, args.regex) elif args.cmd=='trim': from trim import trim trim(args.input, args.output, args.lines) print '' print 'Done!'
def get_performance(o, c, t): chord_meters = c * radius prop = propeller.Propeller(t, chord_meters, radius, n_blades, r, y, dr, dy, airfoils=airfoils, Cl_tables=Cl_tables, Cd_tables=Cd_tables) quad = quadrotor.Quadrotor(prop, vehicle_weight) ff_kwargs = { 'propeller': prop, 'pitch': pitch, 'n_azi_elements': n_azi_elements, 'allowable_Re': allowable_Re, 'Cl_funs': Cl_funs, 'Cd_funs': Cd_funs, 'tip_loss': tip_loss, 'mach_corr': mach_corr, 'alt': alt, 'lift_curve_info_dict': lift_curve_info_dict } trim0 = np.array([alpha0, o]) alpha_trim, omega_trim, converged = trim.trim(quad, v_inf, trim0, ff_kwargs) T_ff, H_ff, P_ff = bemt.bemt_forward_flight( quad, pitch, omega_trim, alpha_trim, v_inf, n_azi_elements, alt=alt, tip_loss=tip_loss, mach_corr=mach_corr, allowable_Re=allowable_Re, Cl_funs=Cl_funs, Cd_funs=Cd_funs, lift_curve_info_dict=lift_curve_info_dict) dT_h, P_h = bemt.bemt_axial(prop, pitch, o, allowable_Re=allowable_Re, Cl_funs=Cl_funs, Cd_funs=Cd_funs, tip_loss=tip_loss, mach_corr=mach_corr, alt=alt) return sum(dT_h), P_h, T_ff, P_ff, alpha_trim, omega_trim
def main(): start = datetime.datetime.now() args = parseArgument() #옵션에 따라 진행 if args.cmd=='search': import search search.contains(args.input, args.output, args.term, args.regex) elif args.cmd=='delete': import delete if args.duplicatedLines: delete.duplicatedLines(args.input, args.output, args.withoutBlankLines) else: delete.contains(args.input, args.output, args.term, args.regex) elif args.cmd=='trim': from trim import trim trim(args.input, args.output, args.lines) took = datetime.datetime.now() - start log.info('Done! (%dsec.)' % (took.seconds))
def test_trim(self): self.assertEqual(""" abc 123 test """, trim.trim(""" abc\t 123\t \t\t test """))
def input_data(image_file): images = np.zeros([1, IMAGE_HEIGHT * IMAGE_WIDTH], dtype='float32') image = Image.open(image_file) image_ = trim(image) image.close() image = image_ image_gray = image.convert('L') image_resize = image_gray.resize(size=(IMAGE_WIDTH, IMAGE_HEIGHT)) input_img = np.array(image_resize, dtype='float32') input_img = np.multiply(input_img.flatten(), 1. / 255) - 0.5 images[0, :] = input_img return images
def download_images(self, urls, name): paths = [] for i, url in enumerate(urls): if i == 0: title = name + 'bg' else: title = name base_dir = "postsToBeUploaded/" fmt = url.split(".")[-1] fmt = fmt.lower() urllib.request.urlretrieve(url, '{}{}.{}'.format(base_dir, title, fmt)) if fmt not in ["JPG", "jpg"]: img = Image.open('{}{}.{}'.format(base_dir, title, fmt)) rgb_im = img.convert('RGB') rgb_im.save('{}{}.jpg'.format(base_dir, title)) os.remove('{}{}.{}'.format(base_dir, title, fmt)) fmt = '.jpg' path = "postsToBeUploaded/" + title + fmt paths.append("postsToBeUploaded/" + title + fmt) trim.trim(path) return tuple(paths)
def test_trim_removing_leading(self): self.assertEqual( """\ abc 123 test """, trim.trim(""" abc\t 123\t \t\t test """, leading=True))
def take_img(img): r_img = find.explore_img(img) t_img = trim.trim(img) r_img = cv.resize(r_img, dsize=(256,256)) t_img = cv.resize(t_img, dsize=(256,256)) y1=deeplearnig.prediction(r_img) y2=deeplearnig.prediction(t_img) if np.max(y1) > np.max(y2): return f(y1.argmax()) else: return f(y2.argmax())
def test_trim(self): self.assertEqual( """ abc 123 test """, trim.trim(""" abc\t 123\t \t\t test """))
def saveFileToDatabase(username,passwd,id): newfile = open(username+'_1','r') data = newfile.read() #print data[1] print "here" data = trim(data) print "here" conn = connMysql() cur = conn.cursor() #sql="insert into signup (username,passwd,score) values (%s,%s,%s)" sql = "UPDATE signup SET score ='"+ data +"' where id = '" + id + "'" #print sql #param = (username,passwd,data) cur.execute(sql) print "here" cur.close() conn.close()
def test_system(self): text = 'abc \n 1234 \n\n \n' import tempfile with tempfile.NamedTemporaryFile(delete=False, mode='w') as temporary_file: temporary_file.write(text) import subprocess process = subprocess.Popen([sys.executable, os.path.join(ROOT_DIR, 'trim'), temporary_file.name], stderr=subprocess.PIPE) process.communicate() self.assertEqual(0, process.returncode) with open(temporary_file.name) as input_file: self.assertEqual(trim.trim(text), input_file.read()) os.remove(temporary_file.name)
def test_system(self): text = 'abc \n 1234 \n\n \n' import tempfile with tempfile.NamedTemporaryFile(delete=False, mode='w') as temporary_file: temporary_file.write(text) import subprocess process = subprocess.Popen([ sys.executable, os.path.join(ROOT_DIR, 'trim'), temporary_file.name ], stderr=subprocess.PIPE) process.communicate() self.assertEqual(0, process.returncode) with open(temporary_file.name) as input_file: self.assertEqual(trim.trim(text), input_file.read()) os.remove(temporary_file.name)
def leaderboardCyclopeptideSequencing(spectrum, n): MASSES = w4lib.AA_MASS base_amino_acids = [] # Get all the single amino acids from the spectrum for mass in spectrum: for k, v in MASSES.items(): if int(mass) == v: base_amino_acids.append(k) leaderboard = list(set(base_amino_acids)) dummy_leaderboard = deepcopy(leaderboard) leaderpeptide = "" validated_candidates = [] while 0 < len(leaderboard): # Expand leaderboard = [ "".join(_) for _ in itertools.product(leaderboard, w4lib.AA_MASS.keys()) ] dummy_leaderboard = deepcopy(leaderboard) for each_peptide in leaderboard: if get_mass(each_peptide) == spectrum[-1]: if cyclopeptideScoring(leaderpeptide, spectrum) < cyclopeptideScoring( each_peptide, spectrum): leaderpeptide = each_peptide if spectrum[-1] < get_mass(each_peptide): dummy_leaderboard.remove(each_peptide) leaderboard = trim(dummy_leaderboard, spectrum, n) dummy_leaderboard = deepcopy(leaderboard) print(leaderpeptide) return "-".join(convert_to_masses(leaderpeptide))
def prepare_data(name, noiseless): '''A function that prepares the HSIM datacubes for analysis. Arguments: --------------- name: String. The name of the HARMONI input/output cubes. For example if name='/dir/name' then the following files must exist: ----------------- /dir/name.fits /dir/name_Reduced_cube.fits /dir/name_Transmission_cube.fits /dir/name_Noiseless_Object_cube.fits ---------------- noiseless: 'True' or 'False' (string). If 'True' then use the noiseless cube. Returns: ---------------- data: A list. Each list element corresponds to a single HARMONI spaxel. Each list element is a dictionary with the following keywords: ------------------ 'rad' - the radius of the spaxel 'spec' - a 1D array. The spectrum of the spaxel. 'spec_err' - a 1D array. The measurement error in the spectrum (1 standard deviation) 'I_mod' - the intensity predicted by the PSF-convolved theoretical model for the spaxel. 'S_mod' - the second moment predicted by the PSF-convolved theoretical model for the spaxel. -------------------- wavel: An array corresponding to the wavelength axis of the HSIM output cubes. in_wavel: An array corresponding to the wavelength axis of the HSIM input cube. ''' # Read in transmission cube. hdulist = fits.open(u'{0}_Transmission_cube.fits'.format(name)) trans_cube = hdulist[0].data # Read in data from reduced cube or noiseless object cube. if noiseless == 'True': hdulist = fits.open(u'{0}_Noiseless_Object_cube.fits'.format(name)) header = hdulist[0].header data_cube = hdulist[0].data data_cube = data_cube / trans_cube else: hdulist = fits.open(u'{0}_Reduced_cube.fits'.format(name)) header = hdulist[0].header data_cube = hdulist[0].data data_cube = data_cube / trans_cube # Read in measurement error from second extension of reduced cube. hdulist = fits.open(u'{0}_Reduced_cube.fits'.format(name)) var_cube = hdulist[1].data err_cube = np.sqrt(var_cube) # Convert variance to standard deviation err_cube = err_cube / trans_cube print('Data cubes read') # Get header values that define wavelength axes of HSIM output cubes and create wavelength array. sample = header[u'CDELT3'] CRVAL3 = header[u'CRVAL3'] NAXIS3 = header[u'NAXIS3'] wavel = np.zeros(NAXIS3) for i in range(0, NAXIS3): wavel[i] = (CRVAL3 + (i * sample)) * 1e4 # Get header values that define spatial scale. NAXIS1 = header[u'NAXIS1'] NAXIS2 = header[u'NAXIS2'] CDELT1 = header[u'CDELT1'] CDELT2 = header[u'CDELT2'] # Check if distance keyword exists in header (in Mpc). Needed to recover the correct intensity. # If not assign just make it 1. if 'D' in header: d = header[u'D'] else: d = 1e6 # Convert the data and errors from units of electrons to solar luminosities. electron2ph = sc.h * sc.c / (wavel * 1e-10) electron2lum = ph2lum(electron2ph, d, sample, CDELT1, CDELT2) data_cube = np.transpose(data_cube) data_cube *= electron2lum data_cube = np.transpose(data_cube) err_cube = np.transpose(err_cube) err_cube *= electron2lum err_cube = np.transpose(err_cube) # Convert the 3D cubes into 2D array, with each entry containing the spectral data for # a single spaxel. data_square = np.zeros((NAXIS2, NAXIS1), dtype=object) err_square = np.empty((NAXIS2, NAXIS1), dtype=object) for j in range(0, NAXIS2): for i in range(0, NAXIS1): data_square[j, i] = data_cube[:, j, i] err_square[j, i] = err_cube[:, j, i] # Trim data down to size of HARMONI's FOV. Be careful here as X and Y axis are inverted. a = 213 # y-axis HARMONI FOV b = 151 # x-axis HARMONI FOV data_square = trim(data_square, a, b) err_square = trim(err_square, a, b) NAXIS2, NAXIS1 = np.shape(data_square) print 'Trimmed data to HARMONI FOV' # Convert from 2D array to lists with corresponding radius list R_list. RGRID = dist_arr(NAXIS2, NAXIS1, CDELT1) squares = RGRID, data_square, err_square lists = [x.flatten() for x in squares] p = lists[0].argsort() R_list, data_list, err_list = [x[p] for x in lists] # Load in the theoretical model that matches the input cube (if it exists). if os.path.isfile("{0}_model.p".format(name)) == True: model = pickle.load(open("{0}_model.p".format(name), "rb")) I_mod_list = model['I_mod'] S_mod_list = model['S_mod'] else: I_mod_list = np.zeros(len(R_list)) S_mod_list = np.zeros(len(R_list)) # Finally construct a list of dictionaries. Each element of the list corresponds to one spaxel. data = [] for i in range(len(R_list)): mdict = {} mdict['rad'] = R_list[i] mdict['spec'] = data_list[i] mdict['spec_err'] = err_list[i] mdict['I_mod'] = I_mod_list[i] mdict['S_mod'] = S_mod_list[i] data.append(mdict) # Create a wavelength array corresponding to the input datacube inheader = fits.getheader('{0}.fits'.format(name), 0) sample = inheader[u'CDELT3'] CRVAL3 = inheader[u'CRVAL3'] NAXIS3 = inheader[u'NAXIS3'] in_wavel = np.zeros(NAXIS3) for i in range(0, NAXIS3): in_wavel[i] = (CRVAL3 + (i * sample)) return data, wavel, in_wavel
def showDoc(self): """Print pplot's doc string """ print trim.trim(doc)
def objfun(xn, **kwargs): radius = kwargs['radius'] r = kwargs['r'] y = kwargs['y'] dr = kwargs['dr'] dy = kwargs['dy'] n_blades = kwargs['n_blades'] airfoils = kwargs['airfoils'] pitch = kwargs['pitch'] vehicle_weight = kwargs['vehicle_weight'] max_chord = kwargs['max_chord'] max_chord_tip = kwargs['max_chord_tip'] tip_loss = kwargs['tip_loss'] mach_corr = kwargs['mach_corr'] Cl_tables = kwargs['Cl_tables'] Cd_tables = kwargs['Cd_tables'] Cl_funs = kwargs['Cl_funs'] Cd_funs = kwargs['Cd_funs'] lift_curve_info_dict = kwargs['lift_curve_info_dict'] allowable_Re = kwargs['allowable_Re'] alt = kwargs['alt'] v_inf = kwargs['v_inf'] alpha0 = kwargs['alpha0'] n_azi_elements = kwargs['n_azi_elements'] mission_time = kwargs['mission_time'] omega_h = xn[0] twist0 = xn[1] chord0 = xn[ 2] * radius # Convert to meters from c/R before we use in calculations dtwist = np.array(xn[3:len(r) + 2]) dchord = np.array([x * radius for x in xn[len(r) + 2:2 * len(r) + 2]]) twist = calc_twist_dist(twist0, dtwist) chord = calc_chord_dist(chord0, dchord) f = 10000000. fail = 0 g = [1.0] * (2 * len(r) + 2) # Calculate geometric constraint values. If a genetic algorithm is used we can fail the case immediately if there # are any violations. If a gradient-based algorithm is used this will cause the gradient calculation to fail so the # constraints must be checked normally by the optimizer. g[1] = chord[-1] / radius - max_chord_tip g[2:] = get_geocons(chord, max_chord, radius) prop = propeller.Propeller(twist, chord, radius, n_blades, r, y, dr, dy, airfoils=airfoils, Cl_tables=Cl_tables, Cd_tables=Cd_tables) quad = quadrotor.Quadrotor(prop, vehicle_weight) try: dT_h, P_h = bemt.bemt_axial(prop, pitch, omega_h, allowable_Re=allowable_Re, Cl_funs=Cl_funs, Cd_funs=Cd_funs, tip_loss=tip_loss, mach_corr=mach_corr, alt=alt) except FloatingPointError: print "Floating point error in axial BEMT" fail = 1 return f, g, fail except IndexError: print "Index error in axial BEMT" fail = 1 return f, g, fail try: trim0 = [ alpha0, omega_h ] # Use alpha0 (supplied by user) and the hover omega as initial guesses for trim ff_kwargs = { 'propeller': prop, 'pitch': pitch, 'n_azi_elements': n_azi_elements, 'allowable_Re': allowable_Re, 'Cl_funs': Cl_funs, 'Cd_funs': Cd_funs, 'tip_loss': tip_loss, 'mach_corr': mach_corr, 'alt': alt, 'lift_curve_info_dict': lift_curve_info_dict } alpha_trim, omega_trim, converged = trim.trim(quad, v_inf, trim0, ff_kwargs) if not converged or not 0 < alpha_trim < np.pi / 2 or omega_trim < 0: fail = 1 return f, g, fail T_trim, H_trim, P_trim = bemt.bemt_forward_flight( quad, pitch, omega_trim, alpha_trim, v_inf, n_azi_elements, alt=alt, tip_loss=tip_loss, mach_corr=mach_corr, allowable_Re=allowable_Re, Cl_funs=Cl_funs, Cd_funs=Cd_funs, lift_curve_info_dict=lift_curve_info_dict) except Exception as e: print "{} in ff trim".format(type(e).__name__) fail = 1 return f, g, fail # Find total energy mission_times = [time_in_hover, time_in_ff] in seconds energy = P_h * mission_time[0] + P_trim * mission_time[1] f = energy print "total energy = " + str(f) print "Thrust hover = %s" % str(sum(dT_h)) print "(alpha, omega) = (%f, %f)" % (alpha_trim, omega_trim) # Evaluate performance constraints. g[0] = vehicle_weight / 4 - sum(dT_h) if g[0] > 0: print "hover thrust too low" return f, g, fail
def saveDoc(self): out = open("documentation.txt","w") out.write(trim.trim(doc))
def test_trim_with_empty_string(self): self.assertEqual('\n', trim.trim('')) self.assertEqual('\n', trim.trim('\n'))
def test_trim_should_leave_leading_whitespace(self): self.assertEqual(' abc\n', trim.trim(' abc\n'))
#default arguments parser.add_argument('-i', '--input', help='file to process.') parser.add_argument('-o', '--output', help='file to save result.(default=stdout)') #smart_open.py #parse! args = parser.parse_args() if not args.input: parser.error("No input file provided. use '-i' or '--input'") return args if __name__ == "__main__": args = parseArgument() #옵션에 따라 진행 if args.cmd=='search': import search search.contains(args.input, args.output, args.term, args.regex) elif args.cmd=='delete': import delete if args.duplicatedLines: delete.duplicatedLines(args.input, args.output, args.withoutBlankLines) else: delete.contains(args.input, args.output, args.term, args.regex) elif args.cmd=='trim': from trim import trim trim(args.input, args.output, args.lines) print '' print 'Done!'
Cdata = np.loadtxt(cfile, delimiter=',') #This calls a function to check for low level data input errors #inputchecker(Sdata,Bdata,Pdata,Rdata,Cdata) #These determine the size of the initial blank for the yoyo half Blank_Radius = Sdata[1] / 2 Blank_Length = Sdata[2] / 2 #This computes the initial volume of the blank in question Blank_Volume = ((Blank_Radius**2) * math.pi) * Blank_Length RunningVolume = Blank_Volume #This performs the subtractive operation to cut the desired bearing RunningVolume = RunningVolume - BearingSubtract(Bdata, Sdata) #Calls'trim' function to 'cut away' rest of blank and leave mass accurate #... yoyo curve shape megamatx, megamaty, prof, rim, cup, bulkmat, halfmass = trim( RunningVolume, Sdata, Bdata, Pdata, Rdata, Cdata) #Determination of Properly Formatted 3d Point Cloud for STL generation ubermat = np.column_stack((bulkmat, np.zeros(len(bulkmat)))) angsteps = 30 cloudify(ubermat, angsteps) #Generates png of the final yoyo geometry and displays the control points #pngmaker(Bdata, Pdata, Rdata, Cdata, megamatx, megamaty) #Generates DXF file of the final yoyo geometry dxfmaker(Bdata, prof, rim, cup)
def saveDoc(self): out = open("documentation.txt", "w") out.write(trim.trim(doc))
'allowable_Re': allowable_Re, 'Cl_funs': Cl_funs, 'Cd_funs': Cd_funs, 'tip_loss': tip_loss, 'mach_corr': mach_corr, 'alt': alt, 'lift_curve_info_dict': lift_curve_info_dict } # dT_h, P_h = bemt.bemt_axial(prop, pitch, omega, allowable_Re=allowable_Re, Cl_funs=Cl_funs, Cd_funs=Cd_funs, # tip_loss=True, mach_corr=mach_corr, alt=alt) trim0 = [alpha0, omega] for i in xrange(1000): alpha_trim, omega_trim, converged = trim.trim(quad, v_inf, trim0, ff_kwargs) T_ff, H_ff, P_ff = bemt.bemt_forward_flight( quad, pitch, omega_trim, alpha_trim, v_inf, n_azi_elements, alt=alt, tip_loss=tip_loss, mach_corr=mach_corr, allowable_Re=allowable_Re, Cl_funs=Cl_funs, Cd_funs=Cd_funs, lift_curve_info_dict=lift_curve_info_dict) print "FFnew (T, H, power) = (%f, %f, %f)" % (T_ff, H_ff, P_ff)
from trim import trim if trim('hello ') != 'hello': print('测试失败') elif trim(' hello') != 'hello': print('测试失败') elif trim(' hello ') != 'hello': print('测试失败') elif trim(' hello world ') != 'hello world': print('测试失败') elif trim(' ') != '': print('测试失败') else: print('测试成功')
def explore_img(cls, img): img_color = cv.imread(img, cv.IMREAD_COLOR) height, width = img_color.shape[:2] img_gray = cv.cvtColor(img_color, cv.COLOR_BGR2GRAY) # 이진화로 물체와 배경 분리 # 물체만 영상에 남긴다. ret, img_binary = cv.threshold(img_gray, 0, 255, cv.THRESH_BINARY | cv.THRESH_OTSU) # 잡음 제거 kernel = cv.getStructuringElement(cv.MORPH_RECT, (3, 3)) img_binary = cv.morphologyEx(img_binary, cv.MORPH_OPEN, kernel) # findContour로 컨투어(같은 색,동일한 픽셀값) 검출 # #cv,RETR_EXTERNAL : 가장 바깥쪽 라인 cv2.CHAIN_APPROX_SIMPLE: 컨투어 라인을 그릴 수 있는 포인트만 반환 contours, hierarchy = cv.findContours(img_binary, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE) img_card = None for contour in contours: # 각 컨투어에 대해서 area = cv.contourArea(contour) # 면적을 구한다(단위 픽셀) if area < 10000: # 면적 10000이하 제외. 면적 설정은 논의 필요 continue epsilon = 0.02 * cv.arcLength(contour, True) approx = cv.approxPolyDP(contour, epsilon, True) size = len(approx) img_result = img_color.copy() cv.drawContours(img_result, [approx], -1, (0, 255, 0), 2); cv.waitKey(0) if cv.isContourConvex(approx): hull = cv.convexHull(approx) # convexHull로 볼록하게 윤곽선 만들기 points = [] for p in hull: # 포인트를 리스트에 입력 points.append(p[0]) if size >= 4: # 네개의 엣지를 가지면 사각형 img_card = find.transform(img_color, points) # 정면으로 변화 if (img_card is None): contours_xy = np.array(contours) x_min, x_max = 0, 0 value = list() for i in range(len(contours_xy)): for j in range(len(contours_xy[i])): value.append(contours_xy[i][j][0][0]) # 네번째 괄호가 0일때 x의 값 x_min = min(value) x_max = max(value) # y의 min과 max 찾기 y_min, y_max = 0, 0 value = list() for i in range(len(contours_xy)): for j in range(len(contours_xy[i])): value.append(contours_xy[i][j][0][1]) # 네번째 괄호가 0일때 x의 값 y_min = min(value) y_max = max(value) points = np.zeros((4, 2), dtype="float32") points[0][0] = x_min points[0][1] = y_min points[1][0] = x_max points[1][1] = y_min points[2][0] = x_max points[2][1] = y_max points[3][0] = x_min points[3][1] = y_max img_card = find.transform(img_color, points) trim_card = trim.trim(img)[0] name = trim.trim(img)[1] return img_card
def main(): ############# Global varuables initialisation ################## TIMER = Timer() EXEC_DIR = sys.path[0] # Current running script path SUPPORTED_IDS = [ 'UniProtKB-AC', 'RefSeq', 'UniProtKB-ID', 'GeneID', 'GI', 'GO', 'UniRef100', 'UniRef90', 'UniRef50', 'UniParc', 'UniGene', 'EMBL', 'EMBL-CDS', 'Ensembl', 'Ensembl_TRS', 'Ensembl_PRO' ] ##################### Get parser ############################### parser = get_parser() ######## Print parser help if arguments missed ################# if len(sys.argv) == 1: parser.print_help() sys.exit(1) ########### Manage workflow accorded to Args ################## Arguments = parser.parse_args() ################ Workflow starts here ########################## # ************************ # ****** ID MAPPING ****** # ************************ subprocess.check_call('mkdir -p ' + Arguments.output + '/tmp', shell=True) TMP_DIR = Arguments.output + '/tmp' if not exists(Arguments.mappingFile + '_subset.gz'): # Create idmapping subset file if it dont already exists map.mk_susbet(Arguments.mappingFile) # Only mapping ids if asked if Arguments.mapOnly: if Arguments.toDB: if Arguments.toDB in SUPPORTED_IDS: map.any_ids_to_any_ids(Arguments.mappingFile, Arguments.ech, TMP_DIR + '/../ech_mapped_ids.txt', Arguments.toDB) # Map sample ids map.any_ids_to_any_ids(Arguments.mappingFile, Arguments.univ, TMP_DIR + '/../univ_mapped_ids.txt', Arguments.toDB) # Map universe ids # Remove tmp files if not Arguments.keepTmp: subprocess.check_call('rm -r ' + TMP_DIR, shell=True) sys.exit(0) else: print 'toDB - bad argument: ' + Arguments.toDB + \ '\nPlease use only supported ids. Program will stop now.' parser.print_help() sys.exit(1) else: parser.print_help() sys.exit(1) if Arguments.mapOffline: # Map ids files OFFLINE enabling only all ids support. if Arguments.fromOtherDB: map.any_ids_to_go(Arguments.mappingFile, Arguments.ech, TMP_DIR + '/go_ech_raw.txt', 'GO') # Map sample ids map.any_ids_to_go(Arguments.mappingFile, Arguments.univ, TMP_DIR + '/go_univ_raw.txt', 'GO') # Map universe ids # Map ids files OFFLINE enabling only Refseq and GO ids support. Faster and most reliable solution else: map.ids_to_go(Arguments.mappingFile, Arguments.ech, TMP_DIR + '/go_ech_raw.txt') # Map sample ids map.ids_to_go(Arguments.mappingFile, Arguments.univ, TMP_DIR + '/go_univ_raw.txt') # Map universe ids else: # Map ids files ONLINE enabling all ids support. Results may be uncomplete if Arguments.fromOtherDB: map.any_ids_to_go_online(Arguments.mappingFile, Arguments.ech, TMP_DIR + '/go_ech_raw.txt', 'GO') # Map sample ids map.any_ids_to_go_online(Arguments.mappingFile, Arguments.univ, TMP_DIR + '/go_univ_raw.txt', 'GO') # Map universe ids # Map ids files ONLINE enabling only Refset and Uniprot ids support. BEST solution for strong results else: map.ids_to_go_online(Arguments.mappingFile, Arguments.ech, TMP_DIR + '/go_ech_raw.txt') # Map sample ids map.ids_to_go_online(Arguments.mappingFile, Arguments.univ, TMP_DIR + '/go_univ_raw.txt') # Map universe ids # ************************ # **** GO ENRICHMENT ***** # ************************ # Gene set enrichment and hypergeometric tests using R scripts called by python map module launchGSEA(TMP_DIR) # Trim prokarytic GO-terms if asked by user if Arguments.trim: if Arguments.obo: # Automatically watch if a subset file exists, and generates it if its not the case trim.mk_subset(Arguments.obo, TMP_DIR + '/gosubset.txt') # Trim non prokaryote and non obsolete terms from enrichment results trim.trim(TMP_DIR + '/gosubset.txt', TMP_DIR + '/../hyperesults.csv') # Generates GO distribution plot if Arguments.view: subprocess.check_call('R --vanilla --slave --args ' + TMP_DIR + '/../hyperesults.csv_cleaned.csv < ' +\ EXEC_DIR + '/goView.R', shell = True) else: print "Please provide a obo file!" parser.print_help() exit(1) # Generates GO distribution plot if Arguments.view: subprocess.check_call('R --vanilla --slave --args ' + TMP_DIR + '/../hyperesults.csv < ' + EXEC_DIR + '/goView.R', shell=True) # Remove tmp files if not Arguments.keepTmp: subprocess.check_call('rm -r ' + TMP_DIR, shell=True) ################## Show time elapsed ########################## TIMER.lifetime = "Workflow finished in" print TIMER.lifetime
cfg['exec']['outdir'] = os.path.join(outDir, cfg['exec']['outdir']) try: os.mkdir(cfg['exec']['outdir']) except FileExistsError: cfg['exec']['outdir'] = cfg['exec']['outdir'] + '_' + str(int(time.time())) os.mkdir(cfg['exec']['outdir']) #copy reference to outdir copyfile(cfg['exec']['referenceSequence'], cfg['exec']['outdir'] + '/' + cfg['exec']['referenceSequence']) #parse and store read information from input directory readData = fp.RunFiles(inDir) #trim the reads trim(readData, cfg, numThreads) #setup inital mapping jobs mapping_list = [] for id in readData.runtime['trimmed']: mapping_list.append((id, readData.runtime['trimmed'][id][0], readData.runtime['trimmed'][id][1], os.path.abspath(cfg['exec']['referenceSequence']))) sc.checkexists(os.path.join(cfg['exec']['outdir'] + '/inital_mapping')) #index reference reference sequence indexing(cfg, os.path.abspath(cfg['exec']['referenceSequence'])) #run inital mapping jobs bam_list = mapping(cfg, mapping_list, cfg['exec']['outdir'] + '/inital_mapping', numThreads)