def get_twats(item, proxies=None, count=0, http=None, checkfn=None, nitters={}, host=None, search=False, user_agent="curl/7.60.0", blacklist={}, whitelist={}): query = '/search?f=tweets&q=%s' % item.strip( '#') if search else '/%s' % item page = 1 elapsed_time = time.time() hdr, res, http, host, nitters = nitter_get(query, http, host, nitters, proxies, user_agent) # make sure all tweets fetched in a single invocation get the same timestamp, # otherwise ordering might become messed up, once we sort them timestamp = int(time.time()) twats = [] break_loop = False while True: twats, cursor = extract_twats(res, item, twats, timestamp, checkfn, nitters, blacklist, whitelist) sys.stdout.write('\r[%s] %s: scraping... p:%d ' % (misc.get_timestamp( "%Y-%m-%d %H:%M:%S", elapsed_time), item, page)) sys.stdout.flush() if count == 0 or (len(twats) == 0 and not len(cursor)) or break_loop or ( count != -1 and len(twats) >= count): break if checkfn and not checkfn(item, twats): break # fetch additional tweets that are not in the initial set of 20: if len(twats): last_id = get_effective_twat_id(twats[len(twats) - 1]) # we scrapped everything if not len(cursor): break query = '/search?f=tweets&q=%s%s' % (item.strip( '#'), cursor[0]) if search else '/%s%s' % (item, cursor[0]) hdr, res, http, host, nitters = nitter_get(query, http, host, nitters, proxies, user_agent) page = page + 1 return twats, nitters, host, http, page
def get_data(self): """ :param timestamp: :param observer: :return: """ sun = ephem.Sun() timestamp, unix_time = get_timestamp() time_date = ephem.Date(timestamp) self.observer.date = time_date sun.compute(self.observer) alt, az = sun.alt, sun.az return alt, az, unix_time
x_axis0 = x_axis0[::-1] invert_x0 = True all_mean = [] for proj in projx0: screen = get_screen_from_proj(proj, x_axis0, invert_x0) xx, yy = screen._xx, screen._yy gf = gaussfit.GaussFit(xx, yy) all_mean.append(gf.mean) mean0 = np.mean(all_mean) print('%s: Mean0: %.3e mm' % (main_label, mean0 * 1e3)) if fit_emittance and main_label != 'Short': timestamp0 = misc.get_timestamp(os.path.basename(p_dict['filename0'])) tracker0 = tracking.Tracker(magnet_file, timestamp0, struct_lengths, n_particles, n_emittances, screen_bins, screen_cutoff, smoothen, profile_cutoff, len_profile, quad_wake=quad_wake) bp_test = tracking.get_gaussian_profile(40e-15, tt_halfrange, len_profile, charge, tracker0.energy_eV)
def close(self, close_phase=None): if close_phase is not None: self.handlers[close_phase].close() return for phase in self.log_phases: self.handlers[phase].close() def __del__(self): for phase in self.log_phases: self.handlers[phase].close() if __name__ == '__main__': import random from misc import get_timestamp time = get_timestamp() logger = Logger(time, train_logfile='tests/train.log', val_logfile='tests/val.log') for epoch in range(3): logger.epoch_begin() for i in range(2): metrics = {'seg_loss': random.gauss(0., 1.), 'kl_div': random.gauss(0., 1.)} logger.log_iteration(metrics, i, 'train') logger.log_epoch(metrics, 'train') for j in range(3): metrics = {'seg_loss': random.gauss(0., 1.), 'kl_div': random.gauss(0., 1.)} logger.log_iteration(metrics, j, 'val') logger.log_epoch(metrics, 'val')
def main(): fig_paper = ms.figure('Comparison plots') subplot = ms.subplot_factory(2, 2) sp_ctr_paper = 1 #images0 = dict0['projx'][-1] #x_axis = dict0['x_axis']*1e-6 #if np.diff(x_axis)[0] < 0: # x_axis = x_axis[::-1] # invert_x = True #else: # invert_x = False process_dict = { 'Long': { 'filename': file38, 'main_dict': dict38, 'proj0': dict0['projx'][-1], 'x_axis0': dict0['x_axis'] * 1e-6, 'n_offset': None, 'filename0': file0, 'blmeas': blmeas38, 'flipx': False, }, 'Medium': { 'filename': file25, 'main_dict': dict25, 'proj0': dict25['projx'][7], 'x_axis0': dict25['x_axis'] * 1e-6, 'n_offset': 0, 'filename0': file25, 'blmeas': blmeas25, 'flipx': False, }, } for main_label, p_dict in process_dict.items(): #if main_label != 'Medium': # continue projx0 = p_dict['proj0'] x_axis0 = p_dict['x_axis0'] if np.diff(x_axis0)[0] < 0: x_axis0 = x_axis0[::-1] invert_x0 = True all_mean = [] for proj in projx0: screen = get_screen_from_proj(proj, x_axis0, invert_x0) xx, yy = screen._xx, screen._yy gf = gaussfit.GaussFit(xx, yy) all_mean.append(gf.mean) mean0 = np.mean(all_mean) timestamp0 = misc.get_timestamp(os.path.basename(p_dict['filename0'])) tracker0 = tracking.Tracker( archiver_dir + 'archiver_api_data/2020-10-03.h5', timestamp0, struct_lengths, n_particles, n_emittances, screen_bins, screen_cutoff, smoothen, profile_cutoff, len_profile) bp_test = tracking.get_gaussian_profile(40e-15, tt_halfrange, len_profile, charge, tracker0.energy_eV) screen_sim = tracker0.matrix_forward(bp_test, [10e-3, 10e-3], [0, 0])['screen'] all_emittances = [] for proj in projx0: screen_meas = get_screen_from_proj(proj, x_axis0, invert_x0) emittance_fit = misc.fit_nat_beamsize(screen_meas, screen_sim, n_emittances[0]) all_emittances.append(emittance_fit) new_emittance = np.mean(all_emittances) print(main_label, 'Emittance [nm]', new_emittance * 1e9) n_emittances[0] = new_emittance dict_ = p_dict['main_dict'] file_ = p_dict['filename'] x_axis = dict_['x_axis'] * 1e-6 y_axis = dict_['y_axis'] * 1e-6 n_offset = p_dict['n_offset'] if np.diff(x_axis)[0] < 0: x_axis = x_axis[::-1] invert_x = True else: invert_x = False if np.diff(y_axis)[0] < 0: y_axis = y_axis[::-1] invert_y = True else: invert_y = False timestamp = misc.get_timestamp(os.path.basename(file_)) tracker = tracking.Tracker( archiver_dir + 'archiver_api_data/2020-10-03.h5', timestamp, struct_lengths, n_particles, n_emittances, screen_bins, screen_cutoff, smoothen, profile_cutoff, len_profile) blmeas = p_dict['blmeas'] flip_measured = p_dict['flipx'] profile_meas = tracking.profile_from_blmeas(blmeas, tt_halfrange, charge, tracker.energy_eV, subtract_min=True) profile_meas.reshape(len_profile) profile_meas2 = tracking.profile_from_blmeas(blmeas, tt_halfrange, charge, tracker.energy_eV, subtract_min=True, zero_crossing=2) profile_meas2.reshape(len_profile) if flip_measured: profile_meas.flipx() else: profile_meas2.flipx() profile_meas.cutoff(1e-2) profile_meas2.cutoff(1e-2) beam_offsets = [0., -(dict_['value'] * 1e-3 - mean_struct2)] distance_um = (gaps[n_streaker] / 2. - beam_offsets[n_streaker]) * 1e6 if n_offset is not None: distance_um = distance_um[n_offset] beam_offsets = [beam_offsets[0], beam_offsets[1][n_offset]] tdc_screen1 = tracker.matrix_forward(profile_meas, gaps, beam_offsets)['screen'] tdc_screen2 = tracker.matrix_forward(profile_meas, gaps, beam_offsets)['screen'] plt.figure(fig_paper.number) sp_profile_comp = subplot(sp_ctr_paper, title=main_label, xlabel='t [fs]', ylabel='Intensity (arb. units)') sp_ctr_paper += 1 profile_meas.plot_standard(sp_profile_comp, norm=True, color='black', label='TDC', center='Right') ny, nx = 2, 4 subplot = ms.subplot_factory(ny, nx) sp_ctr = np.inf all_profiles, all_screens = [], [] if n_offset is None: projections = dict_['projx'] else: projections = dict_['projx'][n_offset] for n_image in range(len(projections)): screen = get_screen_from_proj(projections[n_image], x_axis, invert_x) screen.crop() screen._xx = screen._xx - mean0 gauss_dict = tracker.find_best_gauss( sig_t_range, tt_halfrange, screen, gaps, beam_offsets, n_streaker, charge, self_consistent=self_consistent) best_screen = gauss_dict['reconstructed_screen'] best_screen.cutoff(1e-3) best_screen.crop() best_profile = gauss_dict['reconstructed_profile'] if n_image == 0: screen00 = screen bp00 = best_profile best_screen00 = best_screen best_gauss = gauss_dict['best_gauss'] if sp_ctr > (ny * nx): ms.figure('All reconstructions Distance %i %s' % (distance_um, main_label)) sp_ctr = 1 if n_image % 2 == 0: sp_profile = subplot(sp_ctr, title='Reconstructions') sp_ctr += 1 sp_screen = subplot(sp_ctr, title='Screens') sp_ctr += 1 profile_meas.plot_standard(sp_profile, color='black', label='Measured', norm=True, center='Right') tdc_screen1.plot_standard(sp_screen, color='black') color = screen.plot_standard(sp_screen, label=n_image)[0].get_color() best_screen.plot_standard(sp_screen, color=color, ls='--') best_profile.plot_standard(sp_profile, label=n_image, norm=True, center='Right') sp_profile.legend() sp_screen.legend() all_profiles.append(best_profile) # Averaging the reconstructed profiles all_profiles_time, all_profiles_current = [], [] for profile in all_profiles: profile.shift('Right') #all_profiles_time.append(profile.time - profile.time[np.argmax(profile.current)]) all_profiles_time.append(profile.time) new_time = np.linspace(min(x.min() for x in all_profiles_time), max(x.max() for x in all_profiles_time), len_profile) for tt, profile in zip(all_profiles_time, all_profiles): new_current = np.interp(new_time, tt, profile.current, left=0, right=0) new_current *= charge / new_current.sum() all_profiles_current.append(new_current) all_profiles_current = np.array(all_profiles_current) mean_profile = np.mean(all_profiles_current, axis=0) std_profile = np.std(all_profiles_current, axis=0) average_profile = tracking.BeamProfile(new_time, mean_profile, tracker.energy_eV, charge) average_profile.plot_standard(sp_profile_comp, label='Reconstructed', norm=True, center='Right') ms.figure('Test averaging %s' % main_label) sp = plt.subplot(1, 1, 1) for yy in all_profiles_current: sp.plot(new_time, yy / np.trapz(yy, new_time), lw=0.5) to_plot = [ ('Average', new_time, mean_profile, 'black', 3), ('+1 STD', new_time, mean_profile + std_profile, 'black', 1), ('-1 STD', new_time, mean_profile - std_profile, 'black', 1), ] integral = np.trapz(mean_profile, new_time) for pm, ctr, color in [(profile_meas, 1, 'red'), (profile_meas2, 2, 'green')]: #factor = integral/np.trapz(pm.current, pm.time) #t_meas = pm.time-pm.time[np.argmax(pm.current)] i_meas = np.interp(new_time, pm.time, pm.current) bp = tracking.BeamProfile(new_time, i_meas, energy_eV=tracker.energy_eV, charge=charge) bp.shift('Right') to_plot.append(('TDC %i' % ctr, bp.time, bp.current, color, 3)) for label, tt, profile, color, lw in to_plot: gf = gaussfit.GaussFit(tt, profile) width_fs = gf.sigma * 1e15 if label is None: label = '' label = (label + ' %i fs' % width_fs).strip() factor = np.trapz(profile, tt) sp.plot(tt, profile / factor, color=color, lw=lw, label=label) sp.legend(title='Gaussian fit $\sigma$') plt.show()
def scrape(item, http, host, search, user_agent): global nitters global mastodon_rshttp item = item.lower() if item in new_accounts: count = args.count checkfn = None new_accounts.remove(item) else: checkfn = fetch_more_tweets_callback count = args.count if item[0] == '#' else -1 if item.count('@') < 2: platform = 'twitter' twats, nitters, host, http, page = get_twats(item, proxies=args.proxy, count=count, http=http, checkfn=checkfn, nitters=nitters, host=host, search=search, user_agent=user_agent, blacklist=blacklist, whitelist=whitelist) else: platform = 'mastodon' twats, http = get_toots(item, proxies=args.proxy, count=count, http=http, checkfn=checkfn, user_agent=user_agent, blacklist=args.blacklist, whitelist=args.whitelist) mastodon_rshttp[host] = http insert_pos = dict() new = False user = None if item[0] == '#' else item insert_pos_total = 0 elapsed_time = time.time() for t in twats: if item[0] == '#': user = t['user'].lower() if not user in insert_pos: insert_pos[user] = 0 if not in_twatlist(user, t): new = True if args.unshorten: t = unshorten_urls(t, proxies=args.proxy, shorteners=shorteners) add_twatlist(user, t, insert_pos[user]) insert_pos[user] += 1 insert_pos_total += 1 if 'quote_tweet' in t: if '@' in t['quote_tweet']['user']: _, foo, bar = t['quote_tweet']['user'].split('@') http = None if not bar in mastodon_rshttp else mastodon_rshttp[ bar] if not os.path.isdir(paths.get_user(t[quote_tweet]['user'])): retry_makedirs(paths.get_user(t[quote_tweet]['user'])) fetch_profile_picture(t[quote_tweet]['user'], args.proxy, twhttp=http, nitters=nitters, platform=platform) if 'user' in t: if '@' in t['user']: _, foo, bar = t['user'].split('@') http = None if not bar in mastodon_rshttp else mastodon_rshttp[ bar] if not os.path.isdir(paths.get_user(t['user'])): retry_makedirs(paths.get_user(t['user'])) fetch_profile_picture(t['user'], args.proxy, twhttp=http, nitters=nitters, platform=platform) if args.mirror: mirror_twat(t, args=args) sys.stdout.write( '\r[%s] %s: extracting from %d page(s): +%d twat(s)' % (misc.get_timestamp("%Y-%m-%d %H:%M:%S", elapsed_time), item, page, insert_pos_total)) sys.stdout.flush() if new: if item[0] == '#': for user in insert_pos.keys(): write_user_tweets(user) else: write_user_tweets(item) elapsed_time = (time.time() - elapsed_time) sys.stdout.write('done (%s)\n' % misc.get_timestamp("%H:%M:%S", elapsed_time)) sys.stdout.flush() return http, host
sp_img = subplot(sp_ctr, grid=False, title='Offset %.2f mm' % (offset*1e3), scix=True, sciy=True) sp_ctr += 1 sp_img.imshow(image, aspect='auto', extent=(x_axis[0], x_axis[-1], y_axis[-1], y_axis[0])) sp_proj = subplot(sp_ctr, title='Projections', scix=True, sciy=True) sp_ctr += 1 sp_proj.plot(x_axis, image.sum(axis=0), label='X') sp_proj.plot(y_axis, image.sum(axis=1), label='Y') sp_proj.legend() # Try for largest offset timestamp = get_timestamp(file_) tracker = tracking.Tracker(archiver_dir + 'archiver_api_data/2020-10-03.h5', timestamp, struct_lengths, n_particles, n_emittances, screen_bins, screen_cutoff, smoothen, profile_cutoff, len_profile, optics0=optics0) r12 = tracker.calcR12()[n_streaker] bp_test = tracking.get_gaussian_profile(40e-15, tt_halfrange, len_profile, charge, energy_eV) screen_sim = tracker.matrix_forward(bp_test, [10e-3, 10e-3], [0, 0])['screen'] emittances_fit = [] for n_image, image in enumerate(dict_['Image'][-1]): screen_meas = tracking.ScreenDistribution(x_axis, image.T.sum(axis=0)) emittance_fit = misc.fit_nat_beamsize(screen_meas, screen_sim, n_emittances[0]) emittances_fit.append(emittance_fit) emittances_fit = np.array(emittances_fit) mean_emittance = emittances_fit.mean()