def phxCompare(star, wlim=None, maxpow=None, mindw=None, ax=None): if ax is None: ax = plt.gca() xf = db.findfiles('ir', 'phx', star, fullpaths=True) pf = db.panpath(star) phx = io.read(xf)[0] pan = io.read(pf)[0] normfac = pan['normfac'][-1] if wlim is not None: phx, pan = utils.keepranges(phx, wlim), utils.keepranges(pan, wlim) if maxpow or mindw: pan = utils.fancyBin(pan, maxpow=maxpow, mindw=mindw) phx = utils.fancyBin(phx, maxpow=maxpow, mindw=mindw) Fbol = utils.bolo_integral(star) pan['normflux'] = pan['flux'] / Fbol pan['normerr'] = pan['error'] / Fbol phx['normflux'] = phx['flux'] * normfac / Fbol line = specstep(pan, key='normflux', label='Panspec', ax=ax) specstep(pan, key='normerr', label='Panspec Error', ax=ax, color=line.get_color(), ls=':') specstep(phx, key='normflux', label='Phoenix', ax=ax, color='r', alpha=0.5) ax.set_xlim(wlim) ax.legend(loc='best')
def compare_v20_v10(stars=None, plot=False): if stars is None: stars = mlstars oldfile_template = '/Users/rolo7566/Datasets/MUSCLES/products/archive/panspecs v10/p_msl_pan_-----_{}_panspec_native_resolution.fits' hlspfile_template = '/Users/rolo7566/Google Drive/Grad School/PhD Work/muscles/share/hlsp/archive/hlsp_muscles_multi_multi_{}_broadband_v10_var-res-sed.fits' compare_template = '/Users/rolo7566/Google Drive/Grad School/PhD Work/muscles/scratchwork/v10 - v20 comparisons/{' \ '} v20 vs v10 ratio.fits' for star in stars: p = io.readpan(star) op, = io.read(oldfile_template.format(star)) hp, = io.read(hlspfile_template.format(star)) op = red.splice(op, utils.keepranges(hp, 100., 1169.)) p, op = [utils.keepranges(spec, 0, 10000.) for spec in [p, op]] cs = utils.compare_specs(p, op, savetxt=compare_template.format(star)) cf = compare_template.format(star) cs.write(cf, overwrite=True) if plot: plt.figure() ss(cs, key='ratio') plt.title(star) plt.xlabel('Wavelength $\AA$') plt.ylabel('Ratio of v20 to v10 SED')
def lya_splices(stars='all'): if stars == 'all': stars = rc.stars[:11] stars.remove('gj551') dw = 0.05 for star in stars: pan = io.readpan(star) pan = utils.keepranges(pan, 1100, 1300) pan = utils.evenbin(pan, dw) sf = db.findfiles('u', star, 'coadd', 'cos_g130m') spec, = io.read(sf) spec = utils.evenbin(spec, dw) lf = db.findfiles('u', star, 'mod', 'lya') lya, = io.read(lf) plt.figure() [ss(s, err=False) for s in [pan, spec, lya]] plt.xlim(1210, 1222) up, _ = utils.flux_integral(spec, 1217, 1220) plt.ylim(0, up * 4) plt.legend(['pan', 'cos', 'lya'], loc='best') plt.savefig( os.path.join(rc.scratchpath, 'lya splices', '{} linear.pdf'.format(star))) mx = spec['flux'].max() plt.ylim(mx / 1e7, mx) plt.yscale('log') plt.savefig( os.path.join(rc.scratchpath, 'lya splices', '{} log.pdf'.format(star)))
def phx_compare_single(star): pan = io.read(db.panpath(star))[0] xf = db.findfiles('ir', 'phx', star, fullpaths=True) phx = io.read(xf)[0] phx['flux'] *= pan['normfac'][-1] bands = [rc.fuv, rc.nuv, [rc.vis[0], 5700.]] (pff, pfe) , (pnf, pne), (pvf, pve) = [utils.flux_integral(pan, *b) for b in bands] (xff, _) , (xnf, _), (xvf, _) = [utils.flux_integral(phx, *b) for b in bands] return ((pff - xff)/pff, pfe/pff), ((pnf - xnf)/pnf, pne/pnf), ((pvf - xvf)/pvf, pve/pvf)
def vetcoadd(star, config): """Plot the components of a coadded spectrum to check that the coadd agrees.""" coaddfile = db.coaddfile(star, config) coadd = io.read(coaddfile) assert len(coadd) == 1 coadd = coadd[0] sourcefiles = coadd.meta['SOURCESPECS'] sourcespecs = io.read(sourcefiles) for spec in sourcespecs: specstep(spec) specstep(coadd, lw=2.0, c='k', alpha=0.5) plt.title(path.basename(coadd.meta['FILENAME']))
def read_body(io, expected_size): """ Read a (malformed) HTTP body. Returns: A (body: bytes, is_malformed: bool) tuple. """ body_start = io.tell() try: content = b"".join(http1.read_body(io, expected_size, None)) if io.read(): # leftover? raise HttpException() return content, False except HttpException: io.seek(body_start) return io.read(), True
def compareEUV(star): euvfile = db.findfiles('u', 'euv', star, fullpaths=True)[0] w, f, _ = np.loadtxt(euvfile).T I0 = np.trapz(f, w) print 'Trapz from Allison\'s file: %g' % I0 spec = io.read(euvfile)[0] I1 = np.sum((spec['w1'] - spec['w0']) * spec['flux']) print 'Direct integration from spectbl version of Allison\'s file: %g' % I1 I2 = np.trapz(spec['flux'], (spec['w0'] + spec['w1']) / 2.0) print 'Trapz from spectbl version of Allison\'s file: %g' % I2 p = io.read(db.panpath(star))[0] keep = p['instrument'] == rc.getinsti('mod_euv_young') p = p[keep] I3 = np.sum((spec['w1'] - spec['w0']) * spec['flux']) print 'Direct integration from panspec EUV portion: %g' % I3
def comparespecs(stars, **kwargs): """ Parameters ---------- binfunc : function or 'all', optional Function for regriding spectbls (such as evenbin or powerbin from reduce). Default is powerbin from 1000 to 5000 AA with R=500.0 Set to 'all' to use the entire spectrum. axkw """ if 'binfunc' in kwargs: binfunc = kwargs['binfunc'] del kwargs['binfunc'] else: binfunc = lambda s: utils.powerbin(s, R=5000.0, lo=1100.0, hi=5000.0) plts = [] for star in stars: # read in panspectrum specfile = db.panpath(star) spec = io.read(specfile)[0] # interpolate it onto the desired bins if binfunc != 'all': spec = binfunc(spec) # plot plts.append(normspec(spec, **kwargs)) plt.legend(plts, stars)
def vis_data(request): global custom_data data = { 'crystal_data': """Cr Te O 1.0 7.016000 0.000000 0.000000 0.000000 7.545000 0.000000 -1.637391 0.000000 9.589209 Cr O Te 4 22 8 direct 0.319200 0.501900 0.384200 0.680800 0.001900 0.115800 0.680800 0.498100 0.615800 0.319200 0.998100 0.884200 0.205000 0.644000 0.212100 0.795000 0.144000 0.287900 0.795000 0.356000 0.787900 0.205000 0.856000 0.712100 0.500000 0.000000 0.500000 0.500000 0.500000 0.000000 0.127000 0.314000 0.342900 0.873000 0.814000 0.157100 0.873000 0.686000 0.657100 0.127000 0.186000 0.842900 0.468000 0.375000 0.262300 0.532000 0.875000 0.237700 0.532000 0.625000 0.737700 0.468000 0.125000 0.762300 0.563000 0.641000 0.451700 0.437000 0.141000 0.048300 0.437000 0.359000 0.548300 0.563000 0.859000 0.951700 0.158000 0.649000 0.486600 0.842000 0.149000 0.013400 0.842000 0.351000 0.513400 0.158000 0.851000 0.986600 0.139700 0.859900 0.176200 0.860300 0.359900 0.323800 0.860300 0.140100 0.823800 0.139700 0.640100 0.676200 0.672300 0.861800 0.415800 0.327700 0.361800 0.084200 0.327700 0.138200 0.584200 0.672300 0.638200 0.915800""" } p = request.POST if request.method == 'POST': custom_data = p.get('crystal_data', '') data['crystal_data'] = custom_data f = io.StringIO() f.write(custom_data) s = io.read(f) #s.symmetrize() data['structure'] = s data.update(csrf(request)) custom_data = data['crystal_data'] return render_to_response('analysis/view_data.html', get_globals(data), RequestContext(request))
def test_faster_export(self): # generate 3k users start = time.time() user_count = User.objects.count() User.objects.bulk_create([User( username='******' % i) for i in range(3000)]) # print('created 3k users in %.1f seconds' % (time.time() - start)) self.assertEqual(User.objects.count(), 3000 + user_count) start = time.time() with user_grant_permission(self.user, [ 'auth.change_user', 'auth.adminactions_export_user']): res = self.app.get('/', user='******') res = res.click('Users') form = res.forms['changelist-form'] form['action'] = self.action_name form['select_across'] = 1 self._select_rows(form) res = form.submit() res.form['header'] = 1 res = res.form.submit('apply') res_time = (time.time() - start) # print('Response returned in %.1f seconds' % res_time) io = six.BytesIO(res.body) io.seek(0) w = xlrd.open_workbook(file_contents=io.read()) sheet = w.sheet_by_index(0) self.assertEqual(sheet.nrows, 3000 + user_count + 1) self.assertLessEqual(res_time, 6.5, "Response should return under 6.5 " "seconds, was %.2f" % res_time)
def blackbody_fit(star): """Return a function that is a blackbody fit to the phoenix spectrum for the star. The fit is to the unnormalized phoenix spectrum, so the fit function values must be multiplied by the appropriate normalization factor to match the normalized spectrum.""" phx = io.read(db.findfiles('ir', 'phx', star))[0] # recursively identify relative maxima until there are fewer than N points N = 10000 keep = np.arange(len(phx)) while len(keep) > N: temp, = argrelmax(phx['flux'][keep]) keep = keep[temp] Teff = rc.starprops['Teff_muscles'][star] efac = const.h * const.c / const.k_B / (Teff * u.K) efac = efac.to(u.angstrom).value w = (phx['w0'] + phx['w1']) / 2.0 w = w[keep] planck_shape = 1.0 / w**5 / (np.exp(efac / w) - 1) y = phx['flux'][keep] Sfy = np.sum(planck_shape * y) Sff = np.sum(planck_shape**2) norm = Sfy / Sff return lambda w: norm / w**5 / (np.exp(efac / w) - 1)
def parse(cls, raw): altered = raw comment = "" if altered.startswith("HTTP response [") or altered.startswith( "HTTP redirect ["): io = io.StringIO(raw) first_line = io.readline() parts = cls.extract_status.search(first_line) status_line = "HTTP/1.0 %s %s" % (parts.group(1), parts.group(2)) remain = io.read() altered = status_line + "\r\n" + remain comment = first_line response = http.client.HTTPResponse(FakeSocket(altered)) response.begin() try: content = response.read(-1) except http.client.IncompleteRead: content = raw[raw.find("\r\n\r\n") + 4:].rstrip("\r\n") return cls( httpVersion="HTTP/1.1" if response.version == 11 else "HTTP/1.0", status=response.status, statusText=response.reason, headers=response.msg, content=content, comment=comment, raw=raw)
def initialize_phase(config, timepoint): image_list, image_idx = kchip_io.list_images( config['image']['base_path'], config['image']['names'][timepoint]) phase_df = [] for xy in image_idx: print 'Now analyzing: ', xy[0], ',', xy[1] t = kchip_io.read(config, x=xy[0], y=xy[1], t=timepoint, number=5) gfp, dyes, phase = split_channels(config, t) mask = dye_mask(config, dyes) * phase_mask(config, phase) phase_signal = range_filter(phase_high_pass(config, phase)) phase_props = regionprops(mask, phase_signal) gfp_props = regionprops(mask, gfp) one = np.asarray([p['centroid'] for p in phase_props]) two = np.asarray([p['mean_intensity'] for p in phase_props])[:, np.newaxis] three = np.asarray([p['mean_intensity'] for p in gfp_props])[:, np.newaxis] data = np.hstack((one, two, three)) phase_df_ = pd.DataFrame(data=data, columns=['ImageX', 'ImageY', 'pGFP', 'Phase']) phase_df_['IndexX'] = xy[0] phase_df_['IndexY'] = xy[1] phase_df.append(phase_df_) phase_df = pd.concat(phase_df) phase_df.reset_index(inplace=True, drop=True) return phase_df
def find_coaddfile(specfiles): """ Look for a file that is the coaddition of the provided spectlbs. Returns the filename if it exists and it contains data from all of the provided spectbls, otherwise returns none. """ # check for multiple configurations insts = np.array(map(parse_instrument, specfiles)) if any(insts[:-1] != insts[:-1]): return NotImplemented("...can't deal with different data sources.") coaddfile = coaddpath(specfiles[0]) if os.path.isfile(coaddfile): coadd, = io.read(coaddfile) # check that the coadd contains the same data as the spectbls # return none if any is missing csourcespecs = coadd.meta['SOURCESPECS'] for sf in specfiles: if parse_name(sf) not in csourcespecs: return None return coaddfile # if the file doesn't exist, return None else: return None
def test_issue_93(self): # default date(time) format in XLS export doesn't import well on excel with user_grant_permission( self.user, ['demo.change_demomodel', 'demo.adminactions_export_demomodel']): res = self.app.get('/', user='******') res = res.click('Demo models') form = res.forms['changelist-form'] form['action'] = self.action_name self._select_rows(form) res = form.submit() res.form['header'] = 1 res.form['columns'] = [ 'date', ] res = res.form.submit('apply') io = six.BytesIO(res.body) io.seek(0) w = xlrd.open_workbook(file_contents=io.read(), formatting_info=True) sheet = w.sheet_by_index(0) cell = sheet.cell(1, 1) fmt = w.xf_list[cell.xf_index] format_key = fmt.format_key format = w.format_map[format_key] # gets a Format object self.assertEqual(cell.value, 41303.0) self.assertEqual(cell.ctype, 3) self.assertEqual(format.format_str, u'd/m/Y')
def test_use_display_ko(self): with user_grant_permission(self.user, ['demo.change_demomodel', 'demo.adminactions_export_demomodel']): res = self.app.get('/', user='******') res = res.click('Demo models') form = res.forms['changelist-form'] form['action'] = self.action_name self._select_rows(form) res = form.submit() res.form['header'] = 1 res.form['columns'] = ['char', 'text', 'bigint', 'choices' ''] res = res.form.submit('apply') io = six.BytesIO(res.body) io.seek(0) w = xlrd.open_workbook(file_contents=io.read()) sheet = w.sheet_by_index(0) self.assertEquals(sheet.cell_value(0, 1), u'Chäř') self.assertEquals(sheet.cell_value(0, 2), u'bigint') self.assertEquals(sheet.cell_value(0, 3), u'text') self.assertEquals(sheet.cell_value(0, 4), u'choices') self.assertEquals(sheet.cell_value(1, 1), u'Pizzä ïs Gööd') self.assertEquals(sheet.cell_value(1, 2), 333333333.0) self.assertEquals(sheet.cell_value(1, 3), u'lorem ipsum') self.assertEquals(sheet.cell_value(1, 4), 2.0)
def phxFit(star='gj832', ax=None): if ax is None: ax = plt.gca() ax.autoscale(axis='x', tight=True) ax.set_yscale('log') pan = io.readpan(star) bolo = utils.bolo_integral(star) xf = db.findfiles('ir', 'phx', star, fullpaths=True) phx = io.read(xf)[0] phx['flux'] *= pan['normfac'][-1] pan = pan[pan['instrument'] != pan['instrument'][-1]] fmin = np.min(pan['error'][pan['error'] > 0]) / bolo rng = [phx['w0'][0], 6000.] phx, pan = [ utils.keepranges(spec, rng, ends='loose') for spec in [phx, pan] ] phx, pan = [utils.fancyBin(s, mindw=10.0) for s in [phx, pan]] pan['flux'][pan['flux'] < pan['error'] / 2.0] = np.nan pan['normflux'] = pan['flux'] / bolo phx['normflux'] = phx['flux'] / bolo ymax = 10**np.ceil(np.log10(np.max(phx['normflux']))) ymin = 10**np.floor(np.log10(fmin) - 3) ax.set_ylim(ymin, ymax) pline = plot.specstep(pan, ax=ax, color='k', err=False, key='normflux') xline = plot.specstep(phx, ax=ax, color='gray', err=False, key='normflux') ax.legend((pline, xline), ('$HST$ Data', 'PHOENIX Model'), loc='lower right')
def test_success(self): with user_grant_permission(self.user, ['auth.change_user', 'auth.adminactions_export_user']): res = self.app.get('/', user='******') res = res.click('Users') form = res.forms['changelist-form'] form['action'] = self.action_name # form.set('_selected_action', True, 0) # form.set('_selected_action', True, 1) # form.set('_selected_action', True, 2) self._select_rows(form) res = form.submit() res.form['header'] = 1 res.form['columns'] = ['id', 'username', 'first_name' ''] res = res.form.submit('apply') io = six.BytesIO(res.body) io.seek(0) w = xlrd.open_workbook(file_contents=io.read()) sheet = w.sheet_by_index(0) self.assertEquals(sheet.cell_value(0, 0), u'#') self.assertEquals(sheet.cell_value(0, 1), u'ID') self.assertEquals(sheet.cell_value(0, 2), u'username') self.assertEquals(sheet.cell_value(0, 3), u'first name') self.assertEquals(sheet.cell_value(1, 1), 1.0) self.assertEquals(sheet.cell_value(1, 2), u'sax') self.assertEquals(sheet.cell_value(2, 2), u'user')
def test_use_display_ko(self): with user_grant_permission( self.user, ['demo.change_demomodel', 'demo.adminactions_export_demomodel']): res = self.app.get('/', user='******') res = res.click('Demo models') form = res.forms['changelist-form'] form['action'] = self.action_name self._select_rows(form) res = form.submit() res.form['header'] = 1 res.form['columns'] = ['char', 'text', 'bigint', 'choices' ''] res = res.form.submit('apply') io = six.BytesIO(res.body) io.seek(0) w = xlrd.open_workbook(file_contents=io.read()) sheet = w.sheet_by_index(0) self.assertEquals(sheet.cell_value(0, 1), u'Chäř') self.assertEquals(sheet.cell_value(0, 2), u'bigint') self.assertEquals(sheet.cell_value(0, 3), u'text') self.assertEquals(sheet.cell_value(0, 4), u'choices') self.assertEquals(sheet.cell_value(1, 1), u'Pizzä ïs Gööd') self.assertEquals(sheet.cell_value(1, 2), 333333333.0) self.assertEquals(sheet.cell_value(1, 3), u'lorem ipsum') self.assertEquals(sheet.cell_value(1, 4), 2.0)
def upload_chunks(io, path, options, upload=None, etags=None): if not etags: etags = [] bytes_written = 0 while True: headers = { "part": 1 } if not upload else { "ref": upload.ref, "part": upload.part_number + 1 } upload = file_action.begin_upload(path, headers, options)[0] buf = io.read(upload.partsize) if buf == b'' or buf == "": # Empty bytearray means EOF for BytesIO, Empty String means EOF for StringIO return (upload, etags, bytes_written) if buf is not None: # None means no data but io still open bytes_written += len(buf) response = Api.api_client().send_remote_request( upload.http_method, upload.upload_uri, {"Content-Length": str(len(buf))}, buf) if "ETag" in response.headers: etags.append({ "etag": response.headers["ETag"].strip('"'), "part": upload.part_number })
def test_success(self): with user_grant_permission( self.user, ['auth.change_user', 'auth.adminactions_export_user']): res = self.app.get('/', user='******') res = res.click('Users') form = res.forms['changelist-form'] form['action'] = self.action_name # form.set('_selected_action', True, 0) # form.set('_selected_action', True, 1) # form.set('_selected_action', True, 2) self._select_rows(form) res = form.submit() res.form['header'] = 1 res.form['columns'] = ['id', 'username', 'first_name' ''] res = res.form.submit('apply') io = six.BytesIO(res.body) io.seek(0) w = xlrd.open_workbook(file_contents=io.read()) sheet = w.sheet_by_index(0) self.assertEquals(sheet.cell_value(0, 0), u'#') self.assertEquals(sheet.cell_value(0, 1), u'ID') self.assertEquals(sheet.cell_value(0, 2), u'username') self.assertEquals(sheet.cell_value(0, 3), u'first name') self.assertEquals(sheet.cell_value(1, 1), 1.0) self.assertEquals(sheet.cell_value(1, 2), u'sax') self.assertEquals(sheet.cell_value(2, 2), u'user')
def initialize_post_wells(config, timepoint): ''' Identify wells in post-merge timepoint. Returns post_wells dataFrame. Inputs: - config, the config dictionary - timepoint, the name of the timepoint to analyze (e.g. t0, t1, t2 ...) (should correspond to name in config file) Outputs: - post_wells, dataFrame''' image_list, image_idx = kchip_io.list_images( config['image']['base_path'], config['image']['names'][timepoint]) post_wells = [] for xy in image_idx: print 'Now analyzing: ' + str(xy[0]) + ',' + str(xy[1]) # Read in image post_img = kchip_io.read(config, x=xy[0], y=xy[1], t=timepoint) post_wells_ = drop.post_img_to_wells(config, post_img) post_wells_.insert(0, 'IndexY', xy[1]) post_wells_.insert(0, 'IndexX', xy[0]) post_wells.append(post_wells_) post_wells = pd.concat(post_wells) post_wells.reset_index(inplace=True, drop=True) return post_wells
def test_issue_93(self): # default date(time) format in XLS export doesn't import well on excel with user_grant_permission(self.user, ['demo.change_demomodel', 'demo.adminactions_export_demomodel']): res = self.app.get('/', user='******') res = res.click('Demo models') form = res.forms['changelist-form'] form['action'] = self.action_name self._select_rows(form) res = form.submit() res.form['header'] = 1 res.form['columns'] = ['date', ] res = res.form.submit('apply') io = six.BytesIO(res.body) io.seek(0) w = xlrd.open_workbook(file_contents=io.read(), formatting_info=True) sheet = w.sheet_by_index(0) cell = sheet.cell(1, 1) fmt = w.xf_list[cell.xf_index] format_key = fmt.format_key format = w.format_map[format_key] # gets a Format object self.assertEqual(cell.value, 41303.0) self.assertEqual(cell.ctype, 3) self.assertEqual(format.format_str, u'd/m/Y')
def plotspec(version, ax): f, = db.findfiles(rc.hlsppath, star, 'broadband', version, res) spec, = io.read(f) spec = utils.keepranges(spec, 0, maxw) spec = utils.evenbin(spec, binto) plt.axes(ax) piecespec(spec) plt.text(0.05, 0.9, version, transform=ax.transAxes)
def execute(operator, operand, acc): """Execute a single instruction, and return new desired accumulator result""" global program_counter, z_flag, p_flag, memory, halt_flag if operator == instruction.HLT: # 0xx if operand == 0: # HLT 00 is actually HLT halt_flag = True elif operator == instruction.ADD: # 1xx acc += memory[operand] acc = truncate(acc) elif operator == instruction.SUB: # 2xx acc -= memory[operand] acc = truncate(acc) elif operator == instruction.STA: # 3xx memory[operand] = acc ##trace("m[" + str(operand) + "]=" + str(acc)) elif operator == instruction.LDA: # 5xx acc = memory[operand] ##trace("a=m[" + str(operand) + "]") elif operator == instruction.BRA: # 6xx program_counter = operand elif operator == instruction.BRZ: # 7xx if z_flag: program_counter = operand elif operator == instruction.BRP: # 8xx if p_flag: program_counter = operand elif operator == instruction.IO: # 9xx if operand == instruction.getOperand(instruction.INP): # 901 if not STDIN_REDIRECTED: sys.stdout.write("in? ") value = io.read() #TODO: should we cope with negative numbers here and complement appropriately? #TODO: Should honour buswidth here depending on decimal/binary/hexadecimal io mode if value < 0 or value > 999: raise ValueError("Out of range value:" + str(value)) acc = truncate(value) elif operand == instruction.getOperand(instruction.OUT): # 902 if not STDOUT_REDIRECTED: sys.stdout.write("out=") io.write(acc) else: # unhandled operator raise ValueError("Unknown operator:" + str(operator)) update_flags(acc) return acc
def plotnorm(spec_or_star, ax=plt.gca(), clip=1e-10): if type(spec_or_star) is str: spec = io.read(db.Rpanpath(spec_or_star, 10000))[0] else: spec = spec_or_star spec = utils.add_normflux(spec) bad = spec['normflux'] < 1e-10 spec['normflux'][bad] = np.nan plot.specstep(spec, ax=ax)
def scan(id): vision = get_vision_client() datastore_image= read(id) content = io.read(datastore_image) image = types.Image(content=content) response = vision.text_detection(image= image) texts = response.text_annotations for text in texts: return ('\n"{}"'.format(text.description))
def vetpanspec(pan_or_star, constant_dw=None, redlim=8000.0): """Plot unnormalized components of the panspec with the panspec to see that all choices were good. Phoenix spectrum is excluded because it is too big.""" if type(pan_or_star) is str: star = pan_or_star panspec = io.read(db.panpath(star))[0] else: panspec = pan_or_star star = panspec.meta['STAR'] files = db.panfiles(star)[0] panspec = utils.keepranges(panspec, 0.0, redlim) if constant_dw is None: plotfun = specstep else: panspec = utils.evenbin(panspec, constant_dw) wbins = utils.wbins(panspec) def plotfun(spec, **kwargs): s = utils.rebin(spec, wbins) return specstep(s, **kwargs) for f in files: if 'phx' in f: continue specs = io.read(f) for spec in specs: p = plotfun(spec, err=True)[0] x = (spec['w0'][0] + spec['w0'][-1]) / 2.0 y = np.mean(spec['flux']) inst = db.parse_instrument(spec.meta['NAME']) plt.text(x, y, inst, bbox={ 'facecolor': 'w', 'alpha': 0.5, 'color': p.get_color() }, ha='center', va='center') plotfun(panspec, color='k', alpha=0.5) ymax = np.max(utils.keepranges(panspec, 3000.0, 8000.0)['flux']) plt.gca().set_ylim(-0.01 * ymax, 1.05 * ymax) plt.draw()
def initialize_droplets(config): '''Initialize the droplets DataFrame by finding droplets in all images. Inputs: - config, the config dictionary created from config Outputs: - droplets (DataFrame), with Image index and position, and RGB data ''' # Determine image list image_list, image_idx = kchip_io.list_images( config['image']['base_path'], config['image']['names']['premerge']) # initialize list of droplets dataframes from each image droplets = [] # initialize average f_img, which later averages all the fourier transforms # to detect the rotation f_img = 0 for xy in image_idx: print 'Creating droplets from: ' + str(xy[0]) + ',' + str(xy[1]) # Read in image img = kchip_io.read(config, x=xy[0], y=xy[1], t='premerge') # Locate droplets and store in temporary dataframe, then append to list of dataframes droplets_ = drop.find_droplets(img.sum(axis=2)) droplets_.insert(0, 'IndexY', xy[1]) droplets_.insert(0, 'IndexX', xy[0]) # Pull out the RGB value for each droplet and store in dataframe # Compute local average dyes = drop.local_average(img[:, :, config['image']['dyes']]) to_add = pd.DataFrame(dyes[droplets_['ImageY'], droplets_['ImageX']]) # Fix a bug where if there is only 1 droplet detected it is wrong orientaiton if to_add.shape[1] != 3: to_add = to_add.T droplets_[['R', 'G', 'B']] = to_add # append dataframe droplets.append(droplets_) # fft the image, clip, and store update average f_img += matchmask.clip_image(matchmask.fft( img.mean(axis=2))).astype('float') / len(image_idx) # Concatenate all the droplets_ dataframes droplets = pd.concat(droplets).reset_index(drop=True) # Correct the rotation droplets, rotation_theta = apply_rotation(droplets, f_img) return droplets, rotation_theta
def register(config,img_tuple,t='premerge',t2='t0'): '''Register a translation between a given pre- and post-merge image tuple Inputs: - config, the config file dictionary - img_tuple: a tuple of image pathnames for pre- and post-merge images - t: the pre-merge timepoint name - t2: the post-merge timepoint name Returns: - shift, a 1 x 2 numpy array to be "translated/subtracted" from coordinates in the second image in tuple ''' # Read images pre_img = kchip_io.read(config, x=img_tuple[0],y=img_tuple[1],t=t) post_img = kchip_io.read(config, x=img_tuple[0],y=img_tuple[1],t=t2) slices = np.delete(np.arange(pre_img.shape[2]),config['image']['bugs']) shift,error,diffphase = register_translation(pre_img[:,:,slices], post_img[:,:,slices]) return shift[:-1]
def cyclespec(files): plt.ioff() for f in files: specs = io.read(f) for spec in specs: specstep(spec) plt.title(path.basename(f)) plt.xlabel('Wavelength [$\AA$]') plt.ylabel('Flux [erg/s/cm$^2$/$\AA$]') plt.show() plt.ion()
def phx_norm_compare(): oldfile_template = '/Users/rolo7566/Datasets/MUSCLES/products/archive/panspecs v10/p_msl_pan_-----_{}_panspec_native_resolution.fits' for star in mlstars: op, = io.read(oldfile_template.format(star)) ofac = op['normfac'][-1] p = io.readpan(star) fac = p['normfac'][-1] chng = abs(fac / ofac - 1) * 100. name = rc.starprops['name txt'][star] print '{:8s} | {:6.1f}'.format(name, chng)
def stackpans(range=[1310, 1350], keeprange=[1100, 2000], xlim=None, norm=True, offfac=1.0): if xlim is None: xlim = keeprange pans = [] for star in rc.observed: p = io.read(db.panpath(star))[0] pans.append(utils.keepranges(p, *keeprange)) stackspecs(pans, range, norm=norm, offfac=offfac, xlim=xlim)
def test_utf8_writer(self): writer = converters.getwriter('utf-8') io = writer(self.io) io.write(self.u_japanese + '\n') io.seek(0) result = io.read().strip() tools.eq_(result, self.utf8_japanese) io.seek(0) io.truncate(0) io.write(self.euc_jp_japanese + b'\n') io.seek(0) result = io.read().strip() tools.eq_(result, self.euc_jp_japanese) io.seek(0) io.truncate(0) io.write(self.utf8_japanese + b'\n') io.seek(0) result = io.read().strip() tools.eq_(result, self.utf8_japanese)
def load(filename, memory, startaddr=0): """Load from a file into memory""" f = open(filename, "rt") addr = startaddr while True: instr = io.read(file=f) if instr != None: memory[addr] = instr addr += 1 else: break f.close()