def conv(self): # convert() try: convert(self.c) except: pass
def part_creation_genbank(event): GUI.refresh_gui_part_creation() GUI.select_genbank_file() if not GUI.genbank_file: GUI.conversion_failure() else: converter.convert(GUI.genbank_file)
def _write_plots(self, plt): import converter epsfile=self._get_epsfile() print epsfile plt.write_eps(epsfile) print epsfile.replace('.eps','.png') converter.convert(epsfile,dpi=100)
def plot_trimmed(self, show=False): import converter binsize = 0.02 bmin = 0.01 bmax = 1.25 psfile = self.outfile.replace('.rec','-comparez.eps') plt = biggles.FramedPlot() plt.xlabel = 'z' hall = eu.stat.histogram(self.data['zprimus'], binsize=0.02, min=bmin, max=bmax, more=True) phall=biggles.Histogram(hall['hist'], x0=hall['low'][0], binsize=binsize) phall.label = 'all' trimmed = self.get_trimmed() htrim = eu.stat.histogram(trimmed['zprimus'], binsize=0.02, min=bmin, max=bmax, more=True) phtrim=biggles.Histogram(htrim['hist'], x0=htrim['low'][0], binsize=binsize, color='red') phtrim.label = 'zconf = 4' key = biggles.PlotKey(0.6,0.9,[phall,phtrim]) plt.add(phall, phtrim, key) if show: plt.show() plt.write_eps(psfile) converter.convert(psfile,dpi=90,verbose=True)
def convert_main(asmfile, verbose, stdout, opt): if zipfile.is_zipfile(asmfile): with zipfile.ZipFile(asmfile, "r") as z: z.extractall(asmfile.replace(".zip", "")) directory = asmfile.replace(".zip", "") processed = 0 errored = 0 for file in glob.glob(f"{directory}/**/*.asm", recursive=True): try: if verbose: print(f"Processing file {file}") converter.convert(file, opt, verbose, stdout) processed += 1 except Exception as e: stdout.write( bytes(f"File {file} errored: {str(e)}\n\n\n", encoding="utf-8")) if verbose: print(f"{file} generated exception {str(e)}") errored += 1 print(f"Total processed files {processed}, errored files {errored}") else: try: print(f"Processing file {asmfile}") converter.convert(asmfile, opt, verbose, stdout) except Exception as e: stdout.write( bytes(f"File {asmfile} errored: {str(e)}\n\n\n", encoding="utf-8")) print(f"File {asmfile} errored: {str(e)}") stdout.close() input( f'{"Conversion details in log file" if verbose else "Silence mode was used"}\nPress enter to exit\n' )
def plot_radec(self, type): """ ra/dec plot of all points and the matched points """ import biggles import converter from biggles import FramedPlot,Points print() dir=self.plotdir() if not os.path.exists(dir): os.makedirs(dir) psfile = self.plotfile(type) orig = self.read_original(type) mat = self.read_matched(type) plt=FramedPlot() symsize = 2 if type == 'sdss' or type == 'other': symsize = 0.25 plt.add( Points(orig['ra'],orig['dec'],type='dot', size=symsize) ) plt.add( Points(mat['ra'],mat['dec'],type='dot', color='red', size=symsize) ) plt.xlabel = 'RA' plt.ylabel = 'DEC' print("Writing eps file:", psfile) plt.write_eps(psfile) converter.convert(psfile, dpi=120, verbose=True)
def plot_sheardiff_vs_field(self, nperbin, field, show=False): import biggles biggles.configure("default", "fontsize_min", 1.5) biggles.configure("_HalfAxis", "ticks_size", 2.5) biggles.configure("_HalfAxis", "subticks_size", 1.25) data = self.get_data() type = "vs_" + field epsfile = get_shear_compare_plot_url(self["run"], self["mock_catalog"], type) tab = biggles.Table(1, 2) names = {"shear1_diff": r"$\gamma_1-\gamma_1^{true}", "shear2_diff": r"$\gamma_2-\gamma_2^{true}"} if field == "shear_s2n": xlog = True else: xlog = False plots = eu.plotting.bhist_vs( data, field, "shear1_diff", "shear2_diff", nperbin=nperbin, names=names, size=2.5, xlog=xlog, show=False ) tab[0, 0] = plots[0] tab[0, 1] = plots[1] tab[0, 0].aspect_ratio = 1 tab[0, 1].aspect_ratio = 1 tab.write_eps(epsfile) converter.convert(epsfile, dpi=120, verbose=True) pngfile = epsfile.replace(".eps", ".png") if show: tab.show() return pngfile
def convert(): xml_source = request.files['xmlFile'] msg_source = request.files['msgFile'] if xml_source.filename == '' or msg_source.filename == '': flash('Please upload valid Xml and Msg files') return redirect(url_for('default')) msg_result = BytesIO() hsh_result = BytesIO() try: converter.convert(xml_source.stream, msg_source.stream, msg_result) msg_result.seek(0) hsh_result.write(converter.generate_integrity_hash(msg_result)) msg_result.seek(0) hsh_result.seek(0) zip_stream = generate_zipfile(msg_result.getvalue(), hsh_result.getvalue(), 'result') return send_file(zip_stream, attachment_filename='result.zip', as_attachment=True) except Exception as ex: flash(type(ex).__name__ + ' - ' + str(ex.args)) return redirect(url_for('default'))
def change_event(self, way=True): if way: target_entry_val = self.entry_from target_entry_res = self.entry_to else: target_entry_val = self.entry_to target_entry_res = self.entry_from from_text = target_entry_val.text() if not isfloat(from_text): target_entry_val.setStyleSheet(STYLE_ENTRY_BAD) return else: target_entry_val.setStyleSheet(STYLE_ENTRY_GOOD) target_entry_res.setStyleSheet(STYLE_ENTRY_GOOD) from_value = float(from_text) from_code = self.combo_box_from.currentText().split(" | ")[0] to_code = self.combo_box_to.currentText().split(" | ")[0] if way: to_value = round(converter.convert(from_value, from_code, to_code), 10) else: to_value = round(converter.convert(from_value, to_code, from_code), 10) target_entry_res.setText(str(to_value))
def bark(self): current_time = datetime.datetime.now() print(f'[{current_time}] Woof~ Building template...', end=' \t', flush=True) converter.convert(self.path_of_files) print('Done!')
def main(): # read the config file. We'll need this later. # also caches the file for stages configrc = config.read_config() #load the logger plogger = logger.Logger('pipeline') if len(sys.argv) == 1: plogger.log("Error: no model given to pipeline") return infile = sys.argv[1] job = pipeline.PrintJob(infile, "*****@*****.**") job.status = 'converting' converter.convert(job) #replaced by slic3r automatic validation #job.status = 'validating' #validator.validate(job) job.status = 'slicing' slicer.slice(job) job.status = 'printing' printer.send_job(job)
def plot_bounds(doboss=False, **keys): """ If procrun is set, a random subset of galaxies in the BOSS is plotted in black, and the ones i the BOSS footprint are in grey """ import biggles plt = biggles.FramedPlot() curves = [] if doboss: f='~/masks/stomp-sdss/boss_survey.par' g = sdsspy.yanny.readone(f) i=0 for b in g: # create curves in ra/dec space ra,dec = eu.plotting.transform_box(b['clambdaMin'], b['clambdaMax'], b['cetaMin'], b['cetaMax'], 'sdss','eq') ra = eu.coords.shiftlon(ra,90) curve = biggles.Curve(ra,dec,color='grey') if i == 0: curve.label = 'BOSS' curves.append(curve) plt.add(curve) i+=1 colors=['red','cyan','cadetblue','blue','magenta','orange'] i=0 for t in sorted(bounds): b=bounds[t] ra0 = eu.coords.shiftlon(b['ra'][0], 90)[0] ra1 = eu.coords.shiftlon(b['ra'][1], 90)[0] box = eu.plotting.bbox(ra0,ra1,b['dec'][0],b['dec'][1], color=colors[i], linewidth=3) box.label = t plt.add(box) curves.append(box) i+=1 key = biggles.PlotKey(0.7,0.95, curves) plt.add(key) plt.xlabel = 'RA - 90' #plt.xlabel = 'RA' plt.ylabel = 'DEC' epsfile = keys.get('epsfile',None) if epsfile is None: plt.show() else: epsfile = os.path.expandvars(epsfile) epsfile = os.path.expanduser(epsfile) plt.write_eps(epsfile) import converter converter.convert(epsfile,dpi=100,verbose=True)
def write_eps_and_convert(plt, epsname, dpi=90): import converter d=os.path.dirname(epsname) make_dirs(d) print 'writing:',epsname plt.write_eps(epsname) converter.convert(epsname, dpi=dpi)
def start(self): files = self.list.getAllFiles() if files: out = self.out_line_edit.text() inc = 100 / len(files) for f in files: converter.convert(f, out) self.progressBar.setValue(self.progressBar.value() + inc) self.progressBar.setValue(0)
def start(self): files = self.list.getAllFiles() if files: out = self.out_le.text() inc = 100 / len(files) for f in files: converter.convert(f, out) self.progressBar.setValue(self.progressBar.value()+inc) self.progressBar.setValue(0)
def ex1(self): argv = ['ui', self.selectedCommand] argv.extend(lineEdit.text() for lineEdit in self.lineEditList) converter.convert(argv) msg = QtWidgets.QMessageBox() msg.setWindowTitle('Notice') msg.setText('작업 완료') msg.setStandardButtons(QtWidgets.QMessageBox.Ok) result = msg.exec_()
def wrapper_convert(path_to_json, path_to_xml, easy_mode=True, rect=None): print("Filename `%s`" % path_to_json) try: convert(path_to_json, path_to_xml, easy_mode, rect) print("OK") except: print("FAILURE") traceback.print_exc() return 1 return 0
def main(): params = parse_args() if os.path.isdir(params.output): params.output = os.path.join(params.output, 'csv') logging.getLogger().setLevel(logging.INFO) converter.convert(params.logs, params.struct, params.quests, params.workshops, params.workshops_encoding, params.quests_encoding, params.log_encoding, params.delimiter, params.output)
def main(): rate = int(input("Введите процентную ставку: ")) money = int(input("Введите сумму: ")) period = int(input("Введите период ведения счета в месяцах: ")) result = account.calculate_income(rate, money, period) for i in range(len(converter.v)): a = converter.convert(money, i) b = converter.convert(result, i) print("Параметры счета ({0}):\n".format(a[0]), "Сумма ({0}): {1}".format(a[0], a[1]), "\n", "Ставка: ", rate, "\n", "Период: ", period, "\n", "Сумма на счете в конце периода ({0}): {1}".format(b[0], b[1]))
def _plot_radec(self, data, rdata, binnum): """ plot the z hist for data, randoms, and weighted randoms """ import esutil as eu import biggles import converter pngfile=get_match_weights_file(self['lens_run'], self['rand_run'], self['bin_scheme'], binnum=binnum, ext='png') pngfile=pngfile.replace('weights.png','radec.png') epsfile=pngfile.replace('.png','.eps') title=self.binner.get_label(binnum) print(" writing:",pngfile) nrand=215000 if nrand > rdata.size: nrand=rdata.size #frac=0.1 #nrand = int( rdata.size * frac ) print(" plotting",nrand,"randoms") rind = eu.random.random_indices(rdata.size, nrand) plt=biggles.FramedPlot() plt.title=title plt.xlabel='RA' plt.ylabel='DEC' rpts=biggles.Points(rdata['ra'][rind], rdata['dec'][rind], type='dot') pts=biggles.Points(data['ra'], data['dec'], type='filled circle', color='red', size=0.5) # randoms go on bottom plt.add(rpts, pts) fpts, frpts=eu.plotting.fake_points(['filled circle']*2, ['random','lenses'], colors=['black','red']) key=biggles.PlotKey(0.9,0.9, [fpts, frpts], halign='right') plt.add(key) print("writing:",epsfile) plt.write_eps(epsfile) converter.convert(epsfile, dpi=150, verbose=True)
def plot_fits(pars, samples, dolog=True, show=False, eps=None, par_labels=None): """ """ import esutil as eu import biggles import images biggles.configure('screen','width', 1400) biggles.configure('screen','height', 800) num=pars.shape[0] ndim=pars.shape[1] if par_labels[0]=='fracdev': nrow=1 ncol=2 tab=biggles.Table(nrow,ncol) plin = _plot_single(pars[:,0], samples[:,0], do_ylog=False) plog = _plot_single(pars[:,0], samples[:,0], do_ylog=True) plin.xlabel='fracdev' plog.xlabel='fracdev' tab[0,0]=plin tab[0,1]=plog else: nrow,ncol = images.get_grid(ndim) tab=biggles.Table(nrow,ncol) for dim in xrange(ndim): plt = _plot_single(pars[:,dim], samples[:,dim],do_ylog=True) if par_labels is not None: plt.xlabel=par_labels[dim] else: plt.xlabel=r'$P_%s$' % dim row=(dim)/ncol col=(dim) % ncol tab[row,col] = plt tab.aspect_ratio=nrow/float(ncol) if eps: import converter print(eps) d=os.path.dirname(eps) if not os.path.exists(d): os.makedirs(d) tab.write_eps(eps) converter.convert(eps, verbose=True, dpi=200) if show: tab.show()
def process(db_file, input_dir, output_dir): rows = get_files(db_file) for row in rows: id = row[0] title = row[1] pos = row[2] new_title = fix_title(title) # print(id, pos, title, new_title) input_file = input_dir + "/" + id + ".mp4" output_file = output_dir + "/" + str(pos) + "-" + new_title + ".mp3" print(input_file, output_file) convert(input_file, output_file)
def start(self): include_subfolders = self.subfolders_chb.isChecked() files = self.list.getImagesFromFolders(include_subfolders) if files: imageMagickPath = self.imagemagick_lb.text() trg_ext = self.formatOut_cbox.currentText().lower() out = self.out_le.text() overwrite = True if self.replace_rbtn.isChecked() else False inc = 100 / len(files) for f in files: converter.convert(imageMagickPath, f, trg_ext, out, overwrite) self.progressBar.setValue(self.progressBar.value() + inc) self.progressBar.setValue(0)
def loadSample(B12, B13): a = tkinter.filedialog.askopenfilename(initialdir="/", title="Select file", filetypes=[("wav files", "*.wav"), ("mp3 files", "*.mp3")]) #print(a) global sampleFile global theFile if a.endswith(".wav") and len(a) > 4: sampleFile = a elif a.endswith(".mp3") and len(a) > 4: newName = a[:-4] + ".wav" convert(a, newName) sampleFile = newName print(sampleFile) B12.grid_forget() #App.getNewLabel(sampleFile, 6, 0) App.moveItem(B13, 3, 3) if (theFile != "" and theFile.endswith(".wav") and sampleFile != "" and sampleFile.endswith(".wav")): App.hide(B13) B14 = App.getNewButton("Tempo", 0, 3) B15 = App.getNewButton("Strength", 1, 3) B16 = App.getNewButton("Accuracy", 2, 3) assessment = Analysis(theFile, sampleFile) tempo = assessment.getTempo() strength = assessment.strength() accuracy = assessment.accuracy() averageScore = assessment.calculateAverage() label1 = App.getNewLabel("Tempo: ", 0, 4) label2 = App.getNewLabel("Strength: ", 1, 4) label3 = App.getNewLabel("Accuracy: ", 2, 4) label4 = App.getNewLabel("Average: ", 3, 4) label5 = App.getNewLabel(tempo, 0, 5) label6 = App.getNewLabel(strength, 1, 5) label7 = App.getNewLabel(accuracy, 2, 5) label8 = App.getNewLabel(averageScore, 3, 5) B14['command'] = lambda: messagebox.showinfo("Tempo", assessment.comment()) B15['command'] = lambda: assessment.strengthGraph() B16['command'] = lambda: assessment.accuracyGraph() B17 = App.getNewButton("Clear", 0, 6) B17['command'] = lambda: clear(B13, B14, B15, B16, B17, label1, label2, label3, label4, label5, label6, label7, label8)
def main(): # read the config file. We'll need this later. # also caches the file for stages configrc = config.read_config() udb = users.UserDB() # set up mailfetch. Just fetches the password for now. # v option is temporary, and prevents pipeline from flooding console # should eventually be replaced by proper logging system plogger = logger.Logger('pipeline') mailfetch.initialize() while True: try: poll = mailfetch.poll() # mailfetch.poll gets list of printjobs to work on for job in poll: email = job.sender '''user = udb.find_user(email) if user != None: user.jobs_submitted += 1 else: user = users.User(email, 1) udb.add_user(user)''' #pipeline goes here #each step of the pipeline sets the status and then runs the stage #the stage should store a new file if one is created, but nothing else. job.status = 'converting' converter.convert(job) #job.status = 'validating' #validator.validate(job) job.status = 'slicing' slicer.slice(job) job.status = 'printing' printer.send_job(job) except TypeError: return # wait a while. This lets the computer do something else delay_time = float(configrc['Pipeline']['poll_frequency']) time.sleep(delay_time)
def test_board_general(self): board = [[0 for y in range(N)] for x in range(N)] board[1][0] = 1 board[1][0] = 1 board[2][0] = 1 board[1][1] = 1 board[1][2] = 1 board[0][3] = 2 board[1][3] = 2 board[2][3] = 2 board[3][3] = 1 board[1][4] = 2 board[1][5] = 1 board[1][6] = 1 board[1][7] = 1 board[2][4] = 2 board[2][5] = 2 board[2][6] = 2 board[2][7] = 2 board[2][8] = 1 board[2][2] = 1 board[1][9] = 2 board[2][10] = 1 board[3][11] = 1 board[4][12] = 1 board[5][13] = 1 board[6][14] = 2 print(np.matrix(board)) calculated_features = convert(board) expected_features = [3, 1, 1, 1, 0, 0, 0, 3, 0, 0, 1, 0, 0, 1] npt.assert_array_equal(calculated_features, expected_features)
def analize_model(term_list, config, output): if config['is_query']: return query(term_list, config, output) config['terms'] = term_list printjson(config, config['current']) docs = [] for f in os.scandir(config['path']): docs.append(convert(f)) t_len = len(term_list) d_len = len([x for x in os.scandir(config['path'])]) hidden_1 = 2 * t_len if t_len <= 100 else int(t_len / 50) hidden_2 = hidden_1 if hidden_1 <= 10 else int(hidden_1 / 2) result = { 'action': 'create', 'path': config['path'], 'data': [ matrix(hidden_1, t_len), matrix(hidden_2, hidden_1), matrix(d_len, hidden_2) ], 'tf': [tf(docs, x, t_len) for x in term_list], 'idf': [idf(docs, x) for x in term_list] } printjson(result, output)
def add_song(file_name, path): # Convert if not mp3 if not file_name.endswith(".mp3"): if not converter.convert( path, os.path.join(storage.MUSIC_TEMP_STORAGE, os.path.splitext(file_name)[0] + '.mp3')): return # TODO: check if need to remove old file path = os.path.join(storage.MUSIC_TEMP_STORAGE, os.path.splitext(file_name)[0] + '.mp3') # Read tags if necessary # if correct_tags: # checker.correct_tags(file_path) try: file = MP3(path) except HeaderNotFoundError: return False # Add Song to database divided_name = checker.divide_name(file_name) database.add_song(title=divided_name[0], artist=divided_name[1], path=path, duration=int(file.info.length * 1000), status=u'added') return True
def test_board_issue4(self): board = [ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 2, 0, 0, 2, 0, 0, 0, 0, 0], [0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 2, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 2], [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 2, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0], [2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2] ] print(np.matrix(board)) calculated_features = convert(board) expected_features = [7, 0, 6, 1, 1, 0, 1, 1, 2, 0, 0, 0, 0, 0] npt.assert_array_equal(calculated_features, expected_features)
def run_converter(): ''' Reads input and runs functions from converter.py ''' # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # Хочу попробовать регулярки, чтобы исключить ошибки с введением без пробелов, # или нескольких пробелов. Цифра обязательна (группа 1), валюта # опциональна (группа 2). # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ while True: input_str = input("Enter amount and currency (BYN, EUR, USD, or RUB):\n") match = re.match(r'([0-9]+)\s*([a-zA-Z]{3})?', input_str) if match: moneyz = float(match.group(1)) if match.group(2) and match.group(2).upper() in ('BYN', 'USD', 'EUR', 'RUB'): currency = match.group(2).upper() break else: print( 'No correct currency entered.\n' 'Showing results for BYN:\n' ) currency = 'BYN' break else: print('Please correct amount and currency:\n') continue rates = rates_request(nbrb_url) to_table(convert(moneyz, rates, currency))
def index(): form = UploadForm() if request.method == 'POST' and form.validate_on_submit(): bucket_name = app.config['BUCKET_NAME'] key = uuid.uuid4() filename = str(key) + '.ics' fullpath = '/' + bucket_name + '/' + filename upfile = request.files['csv_file'] try: ics_file = converter.convert(upfile) except (ContentError, HeadersError, DatetimeFormatError) as error: flash(error, 'danger') return render_template('index.html', form=form, links=base_links, links_title=links_title) else: mtype = 'text/calendar' with gcs.open(fullpath, 'w', content_type=mtype) as w: w.write(ics_file) session['key'] = key return redirect(url_for('success')) for field, errors in form.errors.items(): for error in errors: msg = "Whups! {}".format(error) flash(msg) return render_template('index.html', form=form, links=base_links, links_title=links_title)
def asNormalizedUnsignedByteArrayImg(interval, invert, blockRadius, n_bins, slope, matrices, copy_threads, index, imp): sp = imp.getProcessor() # ShortProcessor sp.setRoi(interval.min(0), interval.min(1), interval.max(0) - interval.min(0) + 1, interval.max(1) - interval.min(1) + 1) sp = sp.crop() if invert: sp.invert() CLAHE.run( ImagePlus("", sp), blockRadius, n_bins, slope, None ) # far less memory requirements than NormalizeLocalContrast, and faster. minimum, maximum = autoAdjust(sp) # Transform and convert image to 8-bit, mapping to display range img = ArrayImgs.unsignedShorts( sp.getPixels(), [sp.getWidth(), sp.getHeight()]) sp = None affine = AffineTransform2D() affine.set(matrices[index]) imgI = Views.interpolate(Views.extendZero(img), NLinearInterpolatorFactory()) imgA = RealViews.transform(imgI, affine) imgT = Views.zeroMin(Views.interval(imgA, img)) imgMinMax = convert(imgT, RealUnsignedByteConverter(minimum, maximum), UnsignedByteType) aimg = ArrayImgs.unsignedBytes(Intervals.dimensionsAsLongArray(img)) ImgUtil.copy(ImgView.wrap(imgMinMax, aimg.factory()), aimg, copy_threads) img = imgI = imgA = imgT = imgMinMax = None return aimg
def do_llvm_compile(args): if debug_output: print '****' print 'doing llvm compile' print '****' fname = target_f.func_code.co_filename ff = open(fname,'r') lines = [l for l in ff.readlines()] ff.close() lineno = target_f.func_code.co_firstlineno endlineno = lineno + find_end(lines[lineno:]) an = ast.parse(''.join(lines[lineno:endlineno])) cvt = convert(args=args, env=target_f.__globals__) cvt.determine_types(an) if cvt._unknown_function_present: return None, None if debug_output: dump(an) e = cvt(an) if debug_output: print 'e = ',e e.verify() g_llvm_pass_manager.run(e) if debug_output: print 'after opt = ',e func_ret_types[name] = cvt._return_type return e,cvt._return_type
class MyTestCase(unittest.TestCase): inputDirectory = "C:\Pictures" inputFile = "2.jpg" path = os.path.join(inputDirectory, inputFile) img = cv2.imread(path, 1) converted_image = converter.convert(img, converter_types.ConverterType.GrayScaleMid.value) windows = np.array([2, 3, 4, 5, 7]) layers_builder = LayersBuilder(converted_image, windows) Densities = layers_builder.calculate_density() singularity_bounds = layers_builder.get_singularity_bounds(Densities) def test_intensity_converter(self): self.assertTrue(self.converted_image[0, 0] == 27) self.assertTrue(self.converted_image[209, 0] == 97) self.assertTrue(self.converted_image[209, 207] == 189) self.assertTrue(self.converted_image[0, 207] == 187) def test_calculate_density(self): # window = np.array([2, 3, 4, 5, 7]) # densities with max window size # x = np.log(2 * window + 1); y = np.log(intensity + 1) self.assertTrue(math.isclose(self.Densities[0, 0], 1.955013, rel_tol=1e-5)) self.assertTrue(math.isclose(self.Densities[195, 0], 3.06910, rel_tol=1e-5)) self.assertTrue(math.isclose(self.Densities[195, 193], 2.47862, rel_tol=1e-5)) self.assertTrue(math.isclose(self.Densities[0, 193], 2.26545, rel_tol=1e-5)) self.assertTrue(math.isclose(self.Densities[134, 146], 1.65610, rel_tol=1e-5)) self.assertTrue(math.isclose(self.Densities[108, 140], 2.01141, rel_tol=1e-5)) def test_max_min_density(self): self.assertTrue(math.isclose(self.singularity_bounds.begin, 0.76878, rel_tol=1e-5)) self.assertTrue(math.isclose(self.singularity_bounds.end, 3.93179, rel_tol=1e-5))
def process_message(self, data): intr = interpreter.Interpreter() if data['text'].startswith('<@{}> run'.format(self.user_id)): script = data['text'].replace('<@{}> run'.format(self.user_id), '') script = script.strip() script = script.replace('<', '<') script = script.replace('>', '>') out = "" print script try: intr.compile_interpret(parser.parse(script)) out_list = intr.get_stdout() if len(out_list) > 0: out = '\n```{}```'.format('\n'.join(out_list)) else: out = 'No output' except Exception as e: out += "\n\nError has occured while excecuting SlackMojicode:\n```{}: {}```" \ .format(type(e), e) raise e self.outputs.append([data['channel'], '\n' + out]) if data['text'].startswith('<@{}> convert'.format(self.user_id)): script = data['text'].replace('<@{}> convert'.format(self.user_id), '') script = script.strip() script = script.replace('<', '<') script = script.replace('>', '>') script = converter.convert(script) self.outputs.append([data['channel'], script])
def post(self): try: values = request.json return convert(collection, values) except Exception as e: print(e) exit()
def save_frame(__frame, message_uint8, stream, args): if args.wait: pass # TODO check if a key (e.g. space) was pressed to save this finished frame # "channels_samplerate_bitplane_YYYYmmdd-HHMMSS" fname = '_'.join([ str(int(stream._channels)), str(int(stream._samplerate)), str(args.bit_plane), time.strftime('%Y%m%d-%H%M%S') ]) fname = os.path.join(args.output_folder, fname) cv2.imwrite(filename=fname + ".png", img=__frame) if args.verbose: print(f"Saved image to '{fname}.png'") if args.save_audio: decoded_audio = decode(__frame, args.bit_plane) decoded_audio = convert(decoded_audio, to='int16') if stream._channels == 2: pass # TODO convert decoded_audio to a 2D array if it's stereo wavfile.write(filename=fname + ".wav", rate=int(stream._samplerate), data=decoded_audio) if args.verbose: print(f"Saved audio to '{fname}.wav'")
def read_html(self, path): with open(path, "r") as f: resp = f.read() f.close() resp = convert(resp) return resp
def usd2rub(): """Convert usd to rub""" try: value = float(request.args.get('value')) except (ValueError, TypeError): return jsonify({'error': 'wrong value type'}) answer = convert(value) return jsonify({'result': answer})
def recurse(body, domain, counter): data = get_data(domain) train = GPTTrain(data, c_type, domain=domain) if filename != None: response, status = run(filename, train, domain) print(response) if status == "fail": if counter < len(subdomain) - 1: counter += 1 domain = subdomain[counter] recurse(body, domain, counter) else: pass else: pass elif body != None: if c_type == "text/plain": body = convert(body) response, status = run(body, train, domain) if status == "fail": if counter < len(subdomain) - 1: counter += 1 domain = subdomain[counter] recurse(body, domain, counter) else: pass else: print(response) elif c_type == "text/html": body = convert(body) body = body + ' $$ ' + subject response, status = run(body, train, domain) if status == "fail": if counter < len(subdomain) - 1: counter += 1 domain = subdomain[counter] recurse(body, domain, counter) else: pass else: print(response)
def compare_all_other(self, type, show=True): fdict=self.all_other_fdict(type) # this is the original file. It has the redshifts orig = zphot.weighting.read_training(fdict['origfile']) # this is the outputs num = zphot.weighting.read_num(fdict['numfile1']) # this is the weights file weights = zphot.weighting.read_training(fdict['wfile2']) # recoverable set w_recoverable = where1(num['num'] > 0) # this is actually the indexes back into the "orig" file w_keep = num['photoid'][w_recoverable] # get the z values for these validation objects zrec = orig['z'][w_keep] binsize=0.0314 valid_dict = histogram(zrec, min=0, max=1.1, binsize=binsize, more=True) plt=FramedPlot() vhist = valid_dict['hist']/(float(valid_dict['hist'].sum())) pvhist=biggles.Histogram(vhist, x0=valid_dict['low'][0], binsize=binsize) pvhist.label = 'truth' weights_dict = histogram(weights['z'], min=0, max=1.1, binsize=binsize, weights=weights['weight'], more=True) whist = weights_dict['whist']/weights_dict['whist'].sum() pwhist=biggles.Histogram(whist, x0=weights_dict['low'][0], binsize=binsize, color='red') pwhist.label = 'weighted train' key = PlotKey(0.6,0.6,[pvhist,pwhist]) plt.add(pvhist,pwhist,key) plt.add( biggles.PlotLabel(.8, .9, type) ) plt.write_eps(fdict['zhistfile']) converter.convert(fdict['zhistfile'],dpi=90,verbose=True) if show: plt.show()
def main(): """Parse arguments and perform the computation.""" # Parse arguments parser = argparse.ArgumentParser() parser.description = "Compute approximate betweenness centrality of all vertices in a graph using the algorihm by Brandes and Pich, and the time to compute them, and write them to file" parser.add_argument("epsilon", type=util.valid_interval_float, help="accuracy parameter") parser.add_argument("delta", type=util.valid_interval_float, help="confidence parameter") parser.add_argument("graph", help="graph file") parser.add_argument("output", help="output file") parser.add_argument("-m", "--maxconn", action="store_true", default=False, help="if the graph is not weakly connected, only save the largest connected component") parser.add_argument("-p", "--pickle", action="store_true", default=False, help="use pickle reader for input file") parser.add_argument("-s", "--samplesize", type=util.positive_int, default=0, help="use specified sample size. Overrides epsilon, delta, and diameter computation") parser.add_argument("-t", "--timeout", type=util.positive_int, default=3600, help="Timeout computation after specified number of seconds (default 3600 = 1h, 0 = no timeout)") parser.add_argument("-u", "--undirected", action="store_true", default=False, help="consider the graph as undirected ") parser.add_argument("-v", "--verbose", action="count", default=0, help="increase verbosity (use multiple times for more verbosity)") parser.add_argument("-w", "--write", nargs="?", default=False, const="auto", help="write graph (and computed attributes) to file.") args = parser.parse_args() # Set the desired level of logging util.set_verbosity(args.verbose) # Read graph if args.pickle: G = util.read_graph(args.graph) else: G = converter.convert(args.graph, not args.undirected, args.maxconn) # Compute betweenness if args.samplesize: (stats, betw) = betweenness_sample_size(G, args.samplesize, args.write, args.timeout) else: (stats, betw) = betweenness(G, args.epsilon, args.delta, args.write, args.timeout) # If specified, write betweenness as vertex attributes, and time as graph # attribute back to file if args.write: logging.info("Writing betweenness as vertex attributes and stats as graph attribute") if args.write == "auto": filename = os.path.splitext(args.graph)[0] + ("-undir" if args.undirected else "dir") + ".picklez" G.write(filename) else: G.write(args.write) # Write stats and betweenness to output util.write_to_output(stats, betw, args.output)
def importconv(): global display global deg display,deg = converter.convert(display,deg) if deg == True: button_deg.configure(fg='purple',text='DEG') elif deg == False: button_deg.configure(text="RAD" , fg='DodgerBlue3')
def scan(path, output): try: plain = '' for f in [x for x in os.scandir(path) if os.path.isfile(x)]: plain = plain + convert(f) terms(plain, output) except Exception as e: print(e) pass
def plot_fits(pars, samples, comps, dolog=True, show=False, eps=None, par_labels=None): """ """ import esutil as eu import biggles import images biggles.configure('screen','width', 1400) biggles.configure('screen','height', 800) num=pars.shape[0] ndim=pars.shape[1] nrow,ncol = images.get_grid(ndim) tab=biggles.Table(nrow,ncol) for dim in xrange(ndim): plt = _plot_single(pars[:,dim], samples[:,dim], comps, do_ylog=True) if par_labels is not None: plt.xlabel=par_labels[dim] else: plt.xlabel=r'$P_%s$' % dim row=(dim)/ncol col=(dim) % ncol tab[row,col] = plt tab.aspect_ratio=nrow/float(ncol) if eps: import converter print(eps) d=os.path.dirname(eps) if not os.path.exists(d): os.makedirs(d) tab.write_eps(eps) converter.convert(eps, verbose=True, dpi=200) if show: tab.show()
def plot_z_radius(self): import biggles import converter data=self.read_original() plt=eu.plotting.bscatter(data['z'],data['radius'],show=False) zb = binning.VoidZBinner(4) ll, hl = zb.bin_ranges() c1=biggles.Curve([ll[0]]*2, [6,60]) c2=biggles.Curve([ll[1]]*2, [6,60]) c3=biggles.Curve([ll[2]]*2, [6,60]) c4=biggles.Curve([ll[3]]*2, [6,60]) c5=biggles.Curve([hl[3]]*2, [6,60]) plt.add(c1,c2,c3,c4,c5) w1=where1( (data['z'] > ll[0]) & (data['z'] < hl[0]) ) w2=where1( (data['z'] > ll[1]) & (data['z'] < hl[1]) ) w3=where1( (data['z'] > ll[2]) & (data['z'] < hl[2]) ) w4=where1( (data['z'] > ll[3]) & (data['z'] < hl[3]) ) type='filled circle' size=1 plt.add(biggles.Points(data['z'][w1],data['radius'][w1], color='blue',type=type,size=size)) plt.add(biggles.Points(data['z'][w2],data['radius'][w2], color='red',type=type,size=size)) plt.add(biggles.Points(data['z'][w3],data['radius'][w3], color='magenta',type=type,size=size)) plt.add(biggles.Points(data['z'][w4],data['radius'][w4], color='brown',type=type,size=size)) plt.xlabel='z' plt.ylabel='radius' #plt.show() f=self.original_file().replace('.fits','-z-rad.eps') print(f) plt.write_eps(f) converter.convert(f, dpi=100, verbose=True)
def plotData(self, dataString): # plot data # dataDimension := (X-points, Y-points), given by MTSAT-2 dataSize = self.dataDimension[0] * self.dataDimension[1] imgPStr = converter.convert(\ self.lookupTable, self.dataDimension, dataString ) # maxGray, minGray = extremeValues imgP = Image.fromstring('P', self.dataDimension, imgPStr) imgP.putpalette(self.colorScale.PALETTE) return imgP
def ToMathematica(infile, outfile=""): """ToMathematica(infile, [outfile=""]) converts the specified model file to legacy Cellerator Mathematica notebook. """ if len(outfile)==0: outfile="translated-model.nb" out=utils.uniqueFileName(outfile, type="nb") fout = converter.convert(infile, outfile) return fout
def write_plot(plt, show=False, eps=None, png=None, width=800, height=800, convert=False, dpi=90, **keys): """ If convert=True and eps keyword is sent, then the converter is used. Otherwise send png """ if show: plt.show() if eps is not None: print(eps) plt.write_eps(eps) if eps is not None and convert: import converter converter.convert(eps, dpi=dpi, verbose=True) elif png is not None: print(png) plt.write_img(width, height, png)
def write(self): import converter pstr=[str(s) for s in self.psfnums] pstr='-'.join(pstr) extra=self.get_epsfile_extra() path=files.get_summary_plot_path(ftype='shear', run=self.run, shnum=self.shnum, psfnum=pstr, extra=extra) pngpath=path.replace('.eps','.png') dir=os.path.dirname(path) if not os.path.exists(dir): try: os.makedirs(dir) except: pass print 'writing:',path self.plt.write_eps(path) print 'writing:',path.replace('.eps','.png') converter.convert(path, dpi=90)
def main(): """Parse arguments, call the approximation, write it to file.""" # Parse arguments parser = argparse.ArgumentParser() parser.description = "Compute an approximation of the diameter of a graph and the time needed to compute it, and (if specified) write these info as a graph attributes" parser.add_argument("graph", help="graph file") parser.add_argument("-i", "--implementation", choices=["homegrown", "igraph"], default="homegrown", help="use specified implementation of betweenness computation") parser.add_argument("-m", "--maxconn", action="store_true", default=False, help="if the graph is not weakly connected, only save the largest connected component") parser.add_argument("-p", "--pickle", action="store_true", default=False, help="use pickle reader for input file") parser.add_argument("-u", "--undirected", action="store_true", default=False, help="consider the graph as undirected ") parser.add_argument("-v", "--verbose", action="count", default=0, help="increase verbosity (use multiple times for more verbosity)") parser.add_argument("-w", "--write", action="store_true", default=False, help="write the approximation of diameter of the graph as the 'approx_diameter' graph attribute and the time taken to compute it as the 'approx_diam_time' attribute") args = parser.parse_args() # Set the desired level of logging util.set_verbosity(args.verbose) # Seed the random number generator random.seed() # Read graph if args.pickle: G = util.read_graph(args.graph) else: G = converter.convert(args.graph, not args.undirected, args.maxconn) # Read graph from file # Compute the diameter (elapsed_time, diam) = diameter(G, args.implementation) # Print info print("{}, diameter={}, time={}".format(args.graph, diam, elapsed_time)) # If requested, add graph attributes and write graph back to original file if args.write: logging.info("Writing diameter approximation and time to graph") G["approx_diam"] = diam G["approx_diam_time"] = elapsed_time # We use format auto-detection, which should work given that it worked # when we read the file G.write(args.graph)
def generate_random_weights(path,pickle,undirected): """Read number of nodes from the file path. Return int.""" if pickle: G = util.read_graph(path) else: G = converter.convert(path, not undirected, False) print(G.ecount()) txt_dir=path[0:len(path)-4] + "_weights.txt" file = open(txt_dir,'w') file.write("") for line in range(0,G.ecount()): file.write(repr(random.random()) + '\n') file.close() return 0
def main(): field = "Land" dtype = "Temperature" sensor = "MODIS" collection = "AS3" product = 2 if( not os.path.exists( tomcat_dir ) ): os.makedirs( tomcat_dir ) # Simulate a database query url = extractURL( field, dtype, sensor, collection, product ) if( updated( url ) ): dlFile = download( url ) abs_path = convert( dlFile, "HDF" ) makeTiles( abs_path ) else: pass
def hero_purchase(self): self.show_inventory() print("\"Which would ye like to purchase?\"") item = prompt("") item = converter.convert(item) # Check if item is a real item. if item != False: # If item is in stock and hero can afford it: if item in self.inventory and item.price <= self.hero.coins: self.hero.inventory.append(item) self.hero.coins -= item.price print("\"Nice doing business with you, sir!\"") # If item is in stock and hero cannot afford it: elif item in self.inventory and item.price > self.hero.coins: print("\a\"You can't afford that!\"") # If item is not in stock: else: print("\a\"Sorry I haven't got that in stock.\"")
def search_similar(file_name): file_name_new = converter.convert(file_name, cf.FOLDER_BOT) fps = fingerprint.get_fingerprint_for_short(file_name_new, save=True) os.remove(os.path.join(cf.FOLDER_TEMP, file_name_new)) similar_global = dict() for fp in fps: for band_index, fp_band in enumerate(fp): similar = HEngine.find_similar_daemon_hengine(fp_band) for k, v in similar.items(): if similar_global.get(k): similar_global[k] += v else: similar_global[k] = v ans = [] if similar_global: tracks = db.get_track_names(similar_global.keys()) tracks = { track.id: [0, track.author, track.name] for track in tracks } for k, v in similar_global.items(): tracks[k][0] = v info = "Total candidates {}".format(len(tracks)) ans.append(info) print(info) for item in sorted(tracks.values(), key=lambda x: x[0])[:-6:-1]: info = u'{} - {} - {}'.format(*item) ans.append(info) print(info) else: ans.append(u'not found') return ans
def main(): """ Runs the converter file No input """ # set up html document to allow for css inclusion html_setup = ("<head>\n<link rel=\"stylesheet\" type=\"text/css\" " "href=\"stylesheet.css\"/>\n</head>\n") # get the text and convert md to html test_text = converter.get_file() test_text = converter.convert(test_text) # choose where to save file to file_name = raw_input("Enter a file name to save to: ") file_name = file_name # write to chosen file with open(file_name, 'w') as html_file: html_file.write(html_setup) html_file.write(str(test_text))
def hero_sell(self): # Show inventory, input item to sell self.hero.inventory_menu() item = prompt("\"What would ye like to sell?\" ").lower() # If item does not exist, converter will print appropriate message # and hero_sell() function will end. item = converter.convert(item) # If hero has the item and the item does not belong to the King, commence sale. if item in self.hero.inventory and item.name != "Treasure Map" and item.name != "King's Loot": sale_price = int(item.price * 0.8) print("\"I'll offer you " + str(sale_price) + " silver.\"") response = prompt("Is that agreeable to you?\" (y/n) ").upper() # If sale price is accepted, commence sale if response == "Y": self.hero.drop(item) self.hero.coins += sale_price print("\"Nice doing business with you, sir!\"") # If sale price is not accepted, end function elif response == "N": print("\"Tis the best I can do!\"") # If response is invalid, end function else: print("\a\"What?\"") # If the item is in hero's inventory, but does not belong to him, reject sale. elif item in self.hero.inventory: print("You can't sell that -- it belongs to the King!") # If item does exist but is not in inventory, end function elif item != False: print("\"You don't have one!\a\"")
def main(): options,args = parser.parse_args(sys.argv[1:]) if len(args) < 2: parser.print_help() sys.exit(1) lensrun=args[0] randrun=args[1] bintype=options.bintype minrad=float(options.minrad) subtract_rand = bool(options.subtract_rand) #if bintype is None or nbin is None: # raise ValueError("currently demand some kind of binning") if bintype is None: raise ValueError("currently demand some kind of binning") if subtract_rand: print("Will subtract randoms") b = lensing.binning.instantiate_binner(bintype) nbin=b.get_nbin() binned_data = lensing.files.sample_read(type='binned', sample=lensrun, name=b.get_name()) extra = 'randmatch-%s' % randrun allrand = lensing.files.sample_read(type='binned', sample=lensrun, name=b.get_name(), extra=extra) alldata = lensing.correct.correct(binned_data, allrand, subtract_rand=subtract_rand, minrad=minrad) lensing.files.sample_write(data=alldata, type='corrected', sample=lensrun, name=b.get_name(), extra=extra) # now some plots biggles.configure('screen','width', 1100) biggles.configure('screen','height', 1100) range4var = [0.1,100] for binnum in xrange(nbin): eps_corr_extra='correction-%02d' % binnum eps_corr=lensing.files.sample_file(type='corrected-plots', sample=lensrun, name=b.get_name(), extra=eps_corr_extra, ext='eps') eps_rand_extra='randcomp-%02d' % binnum eps_rand=lensing.files.sample_file(type='corrected-plots', sample=lensrun, name=b.get_name(), extra=eps_rand_extra, ext='eps') eps_rand_extra='randcomp-o-%02d' % binnum eps_orand=lensing.files.sample_file(type='corrected-plots', sample=lensrun, name=b.get_name(), extra=eps_rand_extra, ext='eps') eps_dsigcorr_extra='dsigcorr-%02d' % binnum eps_dsigcorr=lensing.files.sample_file(type='corrected-plots', sample=lensrun, name=b.get_name(), extra=eps_dsigcorr_extra, ext='eps') eps_all_extra='allcorr-%02d' % binnum eps_all=lensing.files.sample_file(type='corrected-plots', sample=lensrun, name=b.get_name(), extra=eps_all_extra, ext='eps') lensing.files.make_dir_from_path(eps_corr) data = alldata[binnum] rand = allrand[binnum] label=b.bin_label(binnum) tab=doplot(binned_data[binnum], data, rand, label, show=options.show) # the corr(r)-1 plot tab[0,1][0,0].write_eps(eps_corr) converter.convert(eps_corr, dpi=90, verbose=True) # the corr(r)-1 plot tab[0,1][1,0].write_eps(eps_dsigcorr) converter.convert(eps_dsigcorr, dpi=90, verbose=True) # the rand comparison plot tab[0,0].write_eps(eps_rand) converter.convert(eps_rand, dpi=120, verbose=True) # the rand comparison plot with ortho tab[1,0].write_eps(eps_orand) converter.convert(eps_orand, dpi=120, verbose=True) # all tab.write_eps(eps_all) converter.convert(eps_all, dpi=150, verbose=True) if options.prompt: key=raw_input("hit a key (q to quit): ") if key == 'q': return d=os.path.dirname(eps_corr) os.chdir(d) outfile = os.path.join('correction.html') pattern = eps_corr.replace('%02d.eps' % (nbin-1,), '*.png') pattern=os.path.basename(pattern) print("making correction html file:",outfile) os.system('im2html -p '+pattern+' > '+outfile) outfile = os.path.join('randcomp.html') pattern = eps_rand.replace('%02d.eps' % (nbin-1,), '*.png') pattern=os.path.basename(pattern) print("making rand compare html file:",outfile) os.system('im2html -p '+pattern+' > '+outfile) outfile = os.path.join('dsigcorr.html') pattern = eps_dsigcorr.replace('%02d.eps' % (nbin-1,), '*.png') pattern=os.path.basename(pattern) print("making disg corr html file:",outfile) os.system('im2html -p '+pattern+' > '+outfile) outfile = os.path.join('allcorr.html') pattern = eps_all.replace('%02d.eps' % (nbin-1,), '*.png') pattern=os.path.basename(pattern) print("making all corr html file:",outfile) os.system('im2html -p '+pattern+' > '+outfile)
number2 = int(command[2]) last_value = team6.add(number1, number2) print(last_value) elif cmd == "sub": number1 = int(command[1]) number2 = int(command[2]) last_value = team6.sub(number1, number2) print(last_value) elif cmd == "opp": number = int(command[1]) last_value = team6.opp(number) print(last_value) elif cmd == "pow": number1 = int(command[1]) number2 = int(command[2]) last_value = team3.pow(number1, number2) print(last_value) elif cmd == "convert": converter.convert() elif cmd == "root": last_value=team1.root(int(command[1]),int(command[2])) print(last_value) elif cmd == "divide": number1 = float(command[1]) number2 = float(command[2]) last_value = team5.div(number1, number2) print(last_value) except: print colors.FAIL+"Command failed!"+colors.ENDC