def set_timezone(self): try: Logger.info("开始设置中国时区") subprocess.run("timedatectl set-timezone Asia/Shanghai", shell=True, check=True) subprocess.run("date -R", shell=True) except subprocess.CalledProcessError: Logger.error("时区设置错误")
def get_python_version(self): try: Logger.info("Python3版本") subprocess.run("python3 -V", shell=True, check=True) except subprocess.CalledProcessError: Logger.error("没有找到Python3") try: Logger.info("Python2版本") subprocess.run("python2 -V", shell=True, check=True) except subprocess.CalledProcessError: Logger.error("没有找到Python2")
def install_java(self): try: # 文件存在不需要重复下载 if os.path.exists('jdk-8u231-linux-x64.tar.gz'): subprocess.run('sudo mkdir /usr/lib/jvm', shell=True, check=True) subprocess.run( 'sudo tar -zxvf jdk-8u231-linux-x64.tar.gz -C /usr/lib/jvm' ) else: Logger.info('开始下载jdk8') subprocess.run( 'sudo wget https://github.com/morestart/auto_deploy/releases/download/1.0/jdk-8u231' '-linux-x64.tar.gz', shell=True, check=True) subprocess.run('sudo mkdir /usr/lib/jvm', shell=True, check=True) subprocess.run( 'sudo tar -zxvf jdk-8u231-linux-x64.tar.gz -C /usr/lib/jvm' ) with open('sudo nano ~/.bashrc', 'a+') as f: f.writelines('\n') f.writelines('export JAVA_HOME=/usr/lib/jvm/jdk1.8.0_231') f.writelines('\n') f.writelines('export JRE_HOME=${JAVA_HOME}/jre') f.writelines('\n') f.writelines( 'export CLASSPATH=.:${JAVA_HOME}/lib:${JRE_HOME}/lib') f.writelines('\n') f.writelines('export PATH=${JAVA_HOME}/bin:$PATH') subprocess.run('source ~/.bashrc', shell=True) subprocess.run( 'sudo update-alternatives --install /usr/bin/java java ' '/usr/lib/jvm/jdk1.8.0_231/bin/java 300', shell=True) subprocess.run('java -version', shell=True) except subprocess.CalledProcessError: Logger.error('下载jdk8失败')
def change_apt_source(self): Logger.info('备份系统源') subprocess.run('cp /etc/apt/sources.list /etc/apt/sources.list.bak', shell=True) with open('/etc/apt/sources.list', 'w+') as f: f.writelines(""" # 默认注释了源码镜像以提高 apt update 速度,如有需要可自行取消注释 deb https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ bionic main restricted universe multiverse # deb-src https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ bionic main restricted universe multiverse deb https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ bionic-updates main restricted universe multiverse # deb-src https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ bionic-updates main restricted universe multiverse deb https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ bionic-backports main restricted universe multiverse # deb-src https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ bionic-backports main restricted universe multiverse deb https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ bionic-security main restricted universe multiverse # deb-src https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ bionic-security main restricted universe multiverse # 预发布软件源,不建议启用 # deb https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ bionic-proposed main restricted universe multiverse # deb-src https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ bionic-proposed main restricted universe multiverse """)
def install_mosquitto(self): Logger.info("准备安装Mosquitto Broker") try: subprocess.run("sudo apt install mosquitto mosquitto-clients", shell=True, check=True) mqtt_user_name = input("请输入MQTT用户名:") subprocess.run("sudo mosquitto_passwd -c /etc/mosquitto/passwd " + mqtt_user_name, shell=True, check=True) try: with open("/etc/mosquitto/conf.d/default.conf", "w+") as f: f.write("allow_anonymous false\n" "password_file /etc/mosquitto/pwfile\n" "listener 1883\n") Logger.info("写入MQTT配置成功!") except FileNotFoundError: Logger.error("未发现mqtt配置文件,请重新安装...") except subprocess.CalledProcessError: Logger.error("安装失败,请重新安装") finally: Logger.info("重启MQTT服务") subprocess.run("sudo systemctl restart mosquitto", shell=True, check=True)
def update_source_list(self): Logger.info("准备更新软件包列表") try: subprocess.run("sudo apt update", shell=True, check=True) Logger.info("软件包列表更新完毕") Logger.info("是否更新软件(y or n, default: n)") confirm = input("(不输入可直接回车使用默认值)>") if confirm == "y" or confirm == "Y": self.upgrade_software() except subprocess.CalledProcessError: Logger.error("更新失败")
def install_ssh(self): try: Logger.info("开始安装ssh-server") subprocess.run("sudo apt install openssh-server", shell=True, check=True) try: Logger.info("启动ssh-server") subprocess.run("sudo /etc/init.d/ssh start", shell=True, check=True) Logger.info("写入自启配置") # 写入自启动配置 with open("/etc/rc.local", 'a+') as f: f.writelines('\n') f.writelines('/etc/init.d/ssh start') Logger.info("自启配置写入成功") except subprocess.CalledProcessError: Logger.error("启动失败") except subprocess.CalledProcessError: Logger.error("安装失败,请重新安装")
def emqx_config_explain(self): Logger.info("配置路径:") Logger.info("/etc/emqx") Logger.info("取消匿名访问模式:") Logger.info( "使用sudo nano /etc/emqx/emqx.conf 编辑配置文件,将allow_anonymous设置为false, ctrl o保存" ) Logger.info("鉴权设置:") Logger.info( "使用此命令编辑sudo nano /etc/emqx/etc/plugins/emqx_auth_username.conf, " "注释掉现有内容, 打开auth.user.1的用户名和密码") Logger.warn("在启动鉴权时,请先在dashboard中启动鉴权插件")
class Request: def __init__(self): self.log = Logger() urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) warnings.simplefilter("ignore", ResourceWarning) # 禁用安全请求警告 requests.packages.urllib3.disable_warnings( urllib3.exceptions.InsecureRequestWarning) def post_request_data(self, _url, _data, _headers, case_name=None): response = requests.post(url=_url, data=_data, headers=_headers, verify=False) self.log.info("【%s - 请求地址】:%s" % (case_name, _url)) self.log.info("【%s - 请求参数】: %s" % (case_name, _data)) self.log.info("【%s - 响应码】: %d" % (case_name, response.status_code)) return response def post_request_json(self, _url, _json, _headers, case_name=None): response = requests.post(url=_url, json=_json, headers=_headers, verify=False) self.log.info("【%s - 请求地址】:%s" % (case_name, _url)) self.log.info("【%s - 请求参数】: %s" % (case_name, _json)) self.log.info("【%s - 响应码】: %d" % (case_name, response.status_code)) return response def post_request_files(self, _url, _files, _headers, case_name=None): response = requests.post(url=_url, files=_files, headers=_headers, verify=False) self.log.info("【%s - 请求地址】:%s" % (case_name, _url)) self.log.info("【%s - 请求参数】: %s" % (case_name, _files)) self.log.info("【%s - 响应码】: %d" % (case_name, response.status_code)) return response def get_request(self, _url, _headers, _data=None, case_name=None): response = requests.get(url=_url, params=_data, headers=_headers, verify=False) self.log.info("【%s - 请求地址】:%s" % (case_name, _url)) self.log.info("【%s - 请求参数】: %s" % (case_name, _data)) self.log.info("【%s - 响应码】: %d" % (case_name, response.status_code)) return response
class Cutout: def __init__(self, survey, position, radius, **kwargs): self.survey = survey self.position = position self.ra = self.position.ra.to_value(u.deg) self.dec = self.position.dec.to_value(u.deg) self.radius = radius self.basesurvey = kwargs.get('basesurvey', 'racsI') self.psf = kwargs.get('psf') self.cmap = kwargs.get('cmap', 'gray_r') self.color = 'k' if self.cmap == 'hot' else 'black' self.band = kwargs.get('band', 'g') level = 'DEBUG' if kwargs.get('verbose') else 'INFO' self.logger = Logger(__name__, kwargs.get('log'), streamlevel=level).logger self.logger.propagate = False self.kwargs = kwargs try: self._get_cutout() except Exception as e: msg = f"{survey} failed: {e}" raise FITSException(msg) finally: if 'racs' not in self.survey and 'vast' not in self.survey: self.plot_sources = False self.plot_neighbours = False def __repr__(self): return f"Cutout({self.survey}, ra={self.ra:.2f}, dec={self.dec:.2f})" def _get_source(self): try: pattern = re.compile(r'\S*(\d{4}[+-]\d{2}[AB])\S*') selpath = SURVEYS.loc[self.survey]['selavy'] sel = glob.glob(f'{selpath}/*components.txt') sel = [s for s in sel if pattern.sub(r'\1', self.filepath) in s] if len(sel) > 1: df = pd.concat([pd.read_fwf(s, skiprows=[ 1, ]) for s in sel]) else: df = pd.read_fwf(sel[0], skiprows=[ 1, ]) coords = SkyCoord(df.ra_deg_cont, df.dec_deg_cont, unit=u.deg) d2d = self.position.separation(coords) df['d2d'] = d2d sources = df.iloc[np.where(d2d.deg < 0.5 * self.radius)[0]] sources = sources.sort_values('d2d', ascending=True) if any(sources.d2d < self.pos_err / 3600): self.source = sources.iloc[0] self.neighbours = sources.iloc[1:] self.plot_sources = True else: self.source = None self.neighbours = sources self.plot_sources = False self.plot_neighbours = self.kwargs.get('neighbours', True) self.logger.debug(f'Source: \n {self.source}') if len(self.neighbours) > 0: nn = self.neighbours.iloc[0] self.logger.debug( f'Nearest neighbour coords: \n {nn.ra_deg_cont, nn.dec_deg_cont}' ) self.logger.debug( f'Nearest 5 Neighbours \n {self.neighbours.head()}') except IndexError: self.plot_sources = False self.plot_neighbours = False self.logger.warning('No nearby sources found.') def _get_cutout(self): if not os.path.exists(cutout_cache + self.survey): msg = f"{cutout_cache}{self.survey} cutout directory does not exist, creating." self.logger.info(msg) os.makedirs(cutout_cache + self.survey) if os.path.isfile(self.survey): self._get_local_cutout() elif 'racs' in self.survey or 'vast' in self.survey or 'vlass' in self.survey: self._get_local_cutout() elif self.survey == 'skymapper': self._get_skymapper_cutout() elif self.survey == 'panstarrs': self._get_panstarrs_cutout() elif self.survey == 'decam': self._get_decam_cutout() else: self._get_skyview_cutout() def _get_local_cutout(self): """Fetch cutout data via local FITS images (e.g. RACS / VLASS).""" fields = self._find_image() assert len( fields ) > 0, f"No fields located at {self.position.ra:.2f}, {self.position.dec:.2f}" closest = fields[fields.dist_field_centre == fields.dist_field_centre.min()].iloc[0] image_path = SURVEYS.loc[self.survey]['images'] if self.survey == 'vlass': filepath = f'{closest.epoch}/{closest.tile}/{closest.image}/{closest.filename}' image_path = vlass_path elif 'racs' in self.survey: pol = self.survey[-1] if on_system == 'ada': filepath = f'RACS_test4_1.05_{closest.field}.fits' else: filepath = f'RACS_{closest.field}.EPOCH00.{pol}.fits' elif 'vast' in self.survey: pattern = re.compile(r'vastp(\dx*)([IV])') epoch = pattern.sub(r'\1', self.survey) pol = pattern.sub(r'\2', self.survey) filepath = f'VAST_{closest.field}.EPOCH0{epoch}.{pol}.fits' else: filepath = f'*{closest.field}*0.restored.fits' try: self.filepath = glob.glob(image_path + filepath)[0] except IndexError: raise FITSException( f'Could not match {self.survey} image filepath: \n{image_path + filepath}' ) with fits.open(self.filepath) as hdul: self.header, data = hdul[0].header, hdul[0].data wcs = WCS(self.header, naxis=2) self.mjd = Time(self.header['DATE']).mjd try: cutout = Cutout2D(data[0, 0, :, :], self.position, self.radius * u.deg, wcs=wcs) except IndexError: cutout = Cutout2D(data, self.position, self.radius * u.deg, wcs=wcs) self.data = cutout.data * 1000 self.wcs = cutout.wcs if 'racs' in self.survey or 'vast' in self.survey: self.pos_err = SURVEYS.loc[self.basesurvey].pos_err self._get_source() else: # Probably using vlass, yet to include aegean catalogs self.plot_sources = False self.plot_neighbours = False def _get_panstarrs_cutout(self): """Fetch cutout data via PanSTARRS DR2 API.""" path = cutout_cache + 'panstarrs/{}_{}arcmin_{}_{}.fits'.format( self.band, '{:.3f}', '{:.3f}', '{:.3f}', ) imgpath = path.format(self.radius * 60, self.ra, self.dec) if not os.path.exists(imgpath): pixelrad = int(self.radius * 120 * 120) service = "https://ps1images.stsci.edu/cgi-bin/ps1filenames.py" url = ( f"{service}?ra={self.ra}&dec={self.dec}&size={pixelrad}&format=fits" f"&filters=grizy") table = Table.read(url, format='ascii') msg = f"No PS1 image at {self.position.ra:.2f}, {self.position.dec:.2f}" assert len(table) > 0, msg urlbase = ( f"https://ps1images.stsci.edu/cgi-bin/fitscut.cgi?" f"ra={self.ra}&dec={self.dec}&size={pixelrad}&format=fits&red=" ) flist = ["yzirg".find(x) for x in table['filter']] table = table[np.argsort(flist)] for row in table: self.mjd = row['mjd'] filt = row['filter'] url = urlbase + row['filename'] path = cutout_cache + 'panstarrs/{}_{}arcmin_{}_{}.fits'.format( filt, '{:.3f}', '{:.3f}', '{:.3f}', ) path = path.format(self.radius * 60, self.ra, self.dec) img = requests.get(url, allow_redirects=True) if not os.path.exists(path): with open(path, 'wb') as f: f.write(img.content) with fits.open(imgpath) as hdul: self.header, self.data = hdul[0].header, hdul[0].data self.wcs = WCS(self.header, naxis=2) def _get_skymapper_cutout(self): """Fetch cutout data via Skymapper API.""" path = cutout_cache + self.survey + '/dr2_jd{:.3f}_{:.3f}arcmin_{:.3f}_{:.3f}' linka = 'http://api.skymapper.nci.org.au/aus/siap/dr2/' linkb = 'query?POS={:.5f},{:.5f}&SIZE={:.3f}&BAND=all&RESPONSEFORMAT=CSV' linkc = '&VERB=3&INTERSECT=covers' sm_query = linka + linkb + linkc link = linka + 'get_image?IMAGE={}&SIZE={}&POS={},{}&FORMAT=fits' table = requests.get(sm_query.format(self.ra, self.dec, self.radius)) df = pd.read_csv(io.StringIO(table.text)) assert len( df ) > 0, f'No Skymapper image at {self.position.ra:.2f}, {self.position.dec:.2f}' df = df[df.band == 'z'] self.mjd = df.iloc[0]['mjd_obs'] link = df.iloc[0].get_image img = requests.get(link) path = path.format(self.mjd, self.radius * 60, self.ra, self.dec) if not os.path.exists(path): with open(path, 'wb') as f: f.write(img.content) with fits.open(path) as hdul: self.header, self.data = hdul[0].header, hdul[0].data self.wcs = WCS(self.header, naxis=2) def _get_decam_cutout(self): """Fetch cutout data via DECam LS API.""" size = int(self.radius * 3600 / 0.262) if size > 512: size = 512 maxradius = size * 0.262 / 3600 self.logger.warning( f"Using maximum DECam LS cutout radius of {maxradius:.3f} deg") link = f"http://legacysurvey.org/viewer/fits-cutout?ra={self.ra}&dec={self.dec}" link += f"&size={size}&layer=dr8&pixscale=0.262&bands={self.band}" img = requests.get(link) path = cutout_cache + self.survey + '/dr8_jd{:.3f}_{:.3f}arcmin_{:.3f}_{:.3f}_{}band' path = path.format(self.mjd, self.radius * 60, self.ra, self.dec, self.band) if not os.path.exists(path): with open(path, 'wb') as f: f.write(img.content) with fits.open(path) as hdul: self.header, self.data = hdul[0].header, hdul[0].data self.wcs = WCS(self.header, naxis=2) msg = f"No DECam LS image at {self.position.ra:.2f}, {self.position.dec:.2f}" assert self.data is not None, msg def _get_skyview_cutout(self): """Fetch cutout data via SkyView API.""" sv = SkyView() path = cutout_cache + self.survey + '/{:.3f}arcmin_{:.3f}_{:.3f}.fits' path = path.format(self.radius * 60, self.ra, self.dec) progress = self.kwargs.get('progress', False) if not os.path.exists(path): skyview_key = SURVEYS.loc[self.survey].sv try: hdul = sv.get_images(position=self.position, survey=[skyview_key], radius=self.radius * u.deg, show_progress=progress)[0][0] except IndexError: raise FITSException('Skyview image list returned empty.') except ValueError: raise FITSException( f'{self.survey} is not a valid SkyView survey.') except HTTPError: raise FITSException('No response from Skyview server.') with open(path, 'wb') as f: hdul.writeto(f) with fits.open(path) as hdul: self.header, self.data = hdul[0].header, hdul[0].data self.wcs = WCS(self.header, naxis=2) try: self.mjd = Time(self.header['DATE']).mjd except KeyError: try: self.epoch = self.kwargs.get('epoch') msg = "Could not detect epoch, PM correction disabled." assert self.epoch is not None, msg self.mjd = self.epoch if self.epoch > 3000 else Time( self.epoch, format='decimalyear').mjd except AssertionError as e: if self.kwargs.get('pm'): self.logger.warning(e) self.mjd = None self.data *= 1000 def _find_image(self): """Return DataFrame of survey fields containing coord.""" survey = self.survey.replace('I', '').replace('V', '') try: image_df = pd.read_csv(aux_path + f'{survey}_fields.csv') except FileNotFoundError: raise FITSException(f"Missing field metadata csv for {survey}.") beam_centre = SkyCoord(ra=image_df['cr_ra_pix'], dec=image_df['cr_dec_pix'], unit=u.deg) image_df['dist_field_centre'] = beam_centre.separation( self.position).deg pbeamsize = 1 * u.degree if self.survey == 'vlass' else 5 * u.degree return image_df[image_df.dist_field_centre < pbeamsize].reset_index( drop=True) def _obfuscate(self): """Remove all coordinates and identifying information.""" lon = self.ax.coords[0] lat = self.ax.coords[1] lon.set_ticks_visible(False) lon.set_ticklabel_visible(False) lat.set_ticks_visible(False) lat.set_ticklabel_visible(False) lon.set_axislabel('') lat.set_axislabel('') def _plot_setup(self, fig, ax): """Create figure and determine normalisation parameters.""" if ax: self.fig = fig self.ax = ax else: self.fig = plt.figure() self.ax = self.fig.add_subplot(111, projection=self.wcs) if self.kwargs.get('grid', True): self.ax.coords.grid(color='white', alpha=0.5) self.ax.set_xlabel('RA (J2000)') self.ax.set_ylabel('Dec (J2000)') if self.kwargs.get('title', True): self.ax.set_title(SURVEYS.loc[self.survey]['name'], fontdict={ 'fontsize': 20, 'fontweight': 10 }) if self.kwargs.get('obfuscate', False): self._obfuscate() if self.kwargs.get('annotation'): color = 'white' if self.cmap == 'hot' else 'k' self.ax.text(0.05, 0.85, self.kwargs.get('annotation'), color=color, weight='bold', transform=self.ax.transAxes) def _add_cornermarker(self, ra, dec, span, offset): color = 'white' if self.cmap != 'gray_r' else 'r' cosdec = np.cos(np.radians(dec)) raline = Line2D( xdata=[ra + offset / cosdec, ra + span / cosdec], ydata=[dec, dec], color=color, linewidth=2, path_effects=[pe.Stroke(linewidth=3, foreground='k'), pe.Normal()], transform=self.ax.get_transform('world')) decline = Line2D( xdata=[ra, ra], ydata=[dec + offset, dec + span], color=color, linewidth=2, path_effects=[pe.Stroke(linewidth=3, foreground='k'), pe.Normal()], transform=self.ax.get_transform('world')) self.ax.add_artist(raline) self.ax.add_artist(decline) def plot(self, fig=None, ax=None): """Plot survey data and position overlay.""" self.sign = self.kwargs.get('sign', 1) self._plot_setup(fig, ax) self.data *= self.sign absmax = max(self.data.max(), self.data.min(), key=abs) self.logger.debug(f"Max flux in cutout: {absmax:.2f} mJy.") rms = np.sqrt(np.mean(np.square(self.data))) self.logger.debug(f"RMS flux in cutout: {rms:.2f} mJy.") assert (sum((~np.isnan(self.data).flatten())) > 0 and sum(self.data.flatten()) != 0), \ f"No data in {self.survey}" if self.kwargs.get('maxnorm'): self.norm = ImageNormalize(self.data, interval=ZScaleInterval(), vmax=self.data.max(), clip=True) else: self.norm = ImageNormalize(self.data, interval=ZScaleInterval(contrast=0.2), clip=True) self.im = self.ax.imshow(self.data, cmap=self.cmap, norm=self.norm) if self.kwargs.get('bar', True): try: self.fig.colorbar(self.im, label=r'Flux Density (mJy beam$^{-1}$)', ax=self.ax) except UnboundLocalError: self.logger.error( "Colorbar failed. Upgrade to recent version of astropy ") if self.psf: try: self.bmaj = self.header['BMAJ'] * 3600 self.bmin = self.header['BMIN'] * 3600 self.bpa = self.header['BPA'] except KeyError: self.logger.warning('Header did not contain PSF information.') try: self.bmaj = self.psf[0] self.bmin = self.psf[1] self.bpa = 0 self.logger.warning( 'Using supplied BMAJ/BMin. Assuming BPA=0') except ValueError: self.logger.error('No PSF information supplied.') rhs = self.wcs.wcs_pix2world(self.data.shape[0], 0, 1) lhs = self.wcs.wcs_pix2world(0, 0, 1) # Offset PSF marker by the major axis in pixel coordinates try: cdelt = self.header['CDELT1'] except KeyError: cdelt = self.header['CD1_1'] beamavg = (self.bmaj + self.bmin) / 2 beamsize_pix = beamavg / abs(cdelt) / 3600 ax_len_pix = abs(lhs[0] - rhs[0]) / abs(cdelt) / 3600 beam = self.wcs.wcs_pix2world(beamsize_pix, beamsize_pix, 1) self.beamx = beam[0] self.beamy = beam[1] self.beam = Ellipse((self.beamx, self.beamy), self.bmin / 3600, self.bmaj / 3600, -self.bpa, facecolor='white', edgecolor='k', transform=self.ax.get_transform('world'), zorder=10) self.ax.add_patch(self.beam) # Optionally plot square around the PSF # Set size to greater of 110% PSF size or 10% ax length if self.kwargs.get('beamsquare', False): boxsize = max(beamsize_pix * 1.15, ax_len_pix * .1) offset = beamsize_pix - boxsize / 2 self.square = Rectangle( (offset, offset), boxsize, boxsize, facecolor='white', edgecolor='k', # transform=self.ax.get_transform('world'), zorder=5) self.ax.add_patch(self.square) if self.plot_sources: if self.kwargs.get('corner'): self._add_cornermarker( self.source.ra_deg_cont, self.source.dec_deg_cont, self.kwargs.get('corner_span', 20 / 3600), self.kwargs.get('corner_offset', 10 / 3600)) else: self.sourcepos = Ellipse( (self.source.ra_deg_cont, self.source.dec_deg_cont), self.source.min_axis / 3600, self.source.maj_axis / 3600, -self.source.pos_ang, facecolor='none', edgecolor='r', ls=':', lw=2, transform=self.ax.get_transform('world')) self.ax.add_patch(self.sourcepos) else: if self.kwargs.get('corner'): self._add_cornermarker( self.ra, self.dec, self.kwargs.get('corner_span', 20 / 3600), self.kwargs.get('corner_offset', 10 / 3600)) else: self.bmin = 15 self.bmaj = 15 self.bpa = 0 overlay = SphericalCircle( (self.ra * u.deg, self.dec * u.deg), self.bmaj * u.arcsec, edgecolor='r', linewidth=2, facecolor='none', transform=self.ax.get_transform('world')) self.ax.add_artist(overlay) if self.plot_neighbours: for idx, neighbour in self.neighbours.iterrows(): n = Ellipse((neighbour.ra_deg_cont, neighbour.dec_deg_cont), neighbour.min_axis / 3600, neighbour.maj_axis / 3600, -neighbour.pos_ang, facecolor='none', edgecolor='c', ls=':', lw=2, transform=self.ax.get_transform('world')) self.ax.add_patch(n) def save(self, path, fmt='png'): """Save figure with tight bounding box.""" self.fig.savefig(path, format=fmt, bbox_inches='tight') def savefits(self, path): """Export FITS cutout to path""" header = self.wcs.to_header() hdu = fits.PrimaryHDU(data=self.data, header=header) hdu.writeto(path)
import sys try: Logger.add_logger( logging.handlers.RotatingFileHandler("./logs/bot.log", maxBytes=5 * 1024 * 1024 * 1024, backupCount=1000, encoding="iso-8859-1")) Logger.add_logger(logging.StreamHandler(sys.stdout)) Registry.logger = Logger("registry") logger = Logger("bootstrap") config_file = "./conf/config.json" logger.info("Starting Budabot...") if not os.path.exists(config_file): print('Configuration file not found.') answer = input('Would you like to create one now?[Y/n]') if answer == 'y' or len(answer) < 1: # Create a new config create_new_cfg() else: print( 'You can manually create a config by editing the template /conf/config.template.json and rename it to config.json' ) exit(0) logger.debug("Reading config file '%s'" % config_file) with open(config_file) as cfg: config = json.load(cfg)
metrics=[metric_type]) for i in range(params.epochs): if "unbalance" in params.__dict__ and params.unbalance: model.fit_generator(reader.getPointWiseSamples4Keras(onehot = params.onehot,unbalance=params.unbalance), epochs = 1,steps_per_epoch=int(len(reader.datas["train"])/reader.batch_size), verbose = True,callbacks=[logger.getCSVLogger()]) else: model.fit_generator(reader.getPointWiseSamples4Keras(onehot = params.onehot), epochs = 1,steps_per_epoch=len(reader.datas["train"]["question"].unique())/reader.batch_size, verbose = True,callbacks=[logger.getCSVLogger()]) y_pred = model.predict(x = test_data,batch_size=params.batch_size) score =batch_softmax_with_first_item(y_pred)[:,1] if params.onehot else y_pred metric = reader.evaluate(score, mode = "test") evaluations.append(metric) logger.info(metric) elif params.match_type == 'pairwise': test_data.append(test_data[0]) test_data = [to_array(i,reader.max_sequence_length) for i in test_data] model.compile(loss = identity_loss, optimizer = units.getOptimizer(name=params.optimizer,lr=params.lr), metrics=[percision_bacth], loss_weights=[0.0, 1.0,0.0]) for i in range(params.epochs): model.fit_generator(reader.getPairWiseSamples4Keras(),epochs = 1, steps_per_epoch=int(len(reader.datas["train"]["question"].unique())/reader.batch_size) ,verbose = True,callbacks=[logger.getCSVLogger()]) # for i in range(1): # model.fit_generator(reader.getPairWiseSamples4Keras(),epochs = 1,steps_per_epoch=1,verbose = True)
for parameter in parameters: # old_dataset = params.dataset_name params.setup(zip(grid_parameters.keys(),parameter)) import models.representation as models import dataset reader = dataset.setup(params) params = dataset.process_embedding(reader,params) qdnn = models.setup(params) model = qdnn.getModel() model.compile(loss = params.loss, optimizer = units.getOptimizer(name=params.optimizer,lr=params.lr), metrics=['accuracy']) # model.summary() (train_x, train_y),(test_x, test_y),(val_x, val_y) = reader.get_processed_data() history = model.fit(x=train_x, y = train_y, batch_size = params.batch_size, epochs= params.epochs,validation_data= (test_x, test_y) ,verbose=False,callbacks=[logger.getCSVLogger()])#,verbose=False logger.info(parameter) logger.info(max(history.history["val_acc"])) evaluation = model.evaluate(x = val_x, y = val_y) K.clear_session() # test_match() # x_input = np.asarray([b]) # y = model.predict(x_input) # print(y)
def install_emqx(self): try: Logger.info("开始安装EMQ依赖") subprocess.run("sudo apt update && sudo apt install -y \ apt-transport-https \ ca-certificates \ curl \ gnupg-agent \ software-properties-common", shell=True, check=True) try: Logger.info('添加GPG秘钥') subprocess.run( "curl -fsSL https://repos.emqx.io/gpg.pub | sudo apt-key add -", shell=True, check=True) try: command = "sudo add-apt-repository \ \"deb [arch=amd64] https://repos.emqx.io/emqx-ce/deb/ubuntu/ \ ./$(lsb_release -cs) \ stable\"" subprocess.run(command, shell=True, check=True) self.update_source_list() Logger.info("查询EMQ的可用版本") subprocess.run("sudo apt-cache madison emqx", shell=True) version = input("请输入需要的版本号,默认为最新版本(可直接回车)>") if version == '': try: subprocess.run("sudo apt install emqx", shell=True, check=True) except subprocess.CalledProcessError: Logger.error('安装EMQ失败,请重试') else: try: subprocess.run( "sudo apt install emqx={}".format(version), shell=True, check=True) Logger.info('安装完成') get_better = input("是否调优?(y or n, default is y)>") if get_better == '': subprocess.run( "sudo sysctl -w fs.file-max=2097152", shell=True) subprocess.run( "sudo sysctl -w fs.nr_open=2097152", shell=True) subprocess.run( "sudo echo 2097152 > /proc/sys/fs/nr_open", shell=True) subprocess.run("ulimit -n 1048576", shell=True) with open('/etc/sysctl.conf', 'a+') as f: f.writelines('\n') f.writelines("fs.file-max = 1048576") with open("/etc/systemd/system.conf", 'a+') as f: f.writelines('\n') f.writelines("DefaultLimitNOFILE=1048576") with open("/etc/security/limits.conf", "a+") as f: f.writelines('\n') f.writelines( '* soft nofile 1048576') f.writelines('\n') f.writelines( '* hard nofile 1048576') subprocess.run( 'sysctl -w net.core.somaxconn=32768', shell=True) subprocess.run( 'sysctl -w net.ipv4.tcp_max_syn_backlog=16384', shell=True) subprocess.run( 'sysctl -w net.core.netdev_max_backlog=16384', shell=True) subprocess.run( 'sysctl -w net.ipv4.ip_local_port_range=\'1000 65535\'', shell=True) subprocess.run( 'sysctl -w net.core.rmem_default=262144', shell=True) subprocess.run( 'sysctl -w net.core.wmem_default=262144', shell=True) subprocess.run( 'sysctl -w net.core.rmem_max=16777216', shell=True) subprocess.run( 'sysctl -w net.core.wmem_max=16777216', shell=True) subprocess.run( 'sysctl -w net.core.optmem_max=16777216', shell=True) subprocess.run( 'sysctl -w net.ipv4.tcp_rmem=\'1024 4096 16777216\'', shell=True) subprocess.run( 'sysctl -w net.ipv4.tcp_wmem=\'1024 4096 16777216\'', shell=True) subprocess.run( 'sysctl -w net.nf_conntrack_max=1000000', shell=True) subprocess.run( 'sysctl -w net.netfilter.nf_conntrack_max=1000000', shell=True) subprocess.run( 'sysctl -w net.netfilter.nf_conntrack_tcp_timeout_time_wait=30', shell=True) subprocess.run( 'sysctl -w net.ipv4.tcp_max_tw_buckets=1048576', shell=True) subprocess.run( 'sysctl -w net.ipv4.tcp_fin_timeout=15', shell=True) with open('/etc/emqx/etc/emqx.conf', 'a+') as f: f.writelines('\n') f.writelines( 'node.process_limit = 2097152') f.writelines('\n') f.writelines('node.max_ports = 1048576') Logger.info('请手动配置TCP监听器的 Acceptor 池大小') Logger.info( 'sudo nano /etc/emqx/etc/emqx.conf') Logger.info( '修改 listener.tcp.external.acceptors = 64') except subprocess.CalledProcessError: Logger.error('安装{}版本EMQ失败,请尝试其他版本'.format(version)) except subprocess.CalledProcessError: Logger.error('添加stable仓库失败') except subprocess.CalledProcessError: Logger.error('添加秘钥失败') except subprocess.CalledProcessError: Logger.error('安装依赖失败,请重试')
def upgrade_software(self): try: subprocess.run("sudo apt-get upgrade", shell=True, check=True) Logger.info("更新软件完毕") except subprocess.CalledProcessError: Logger.error("更新失败")
} if __name__=="__main__": # import argparse parser = argparse.ArgumentParser(description='running the complex embedding network') parser.add_argument('-gpu_num', action = 'store', dest = 'gpu_num', help = 'please enter the gpu num.',default=gpu_count) parser.add_argument('-gpu', action = 'store', dest = 'gpu', help = 'please enter the gpu num.',default=0) args = parser.parse_args() parameters= [arg for index,arg in enumerate(itertools.product(*grid_parameters.values())) if index%args.gpu_num==args.gpu] parameters= parameters[::-1] params = Params() config_file = 'config/yahoo.ini' # define dataset in the config params.parse_config(config_file) for parameter in parameters: old_dataset = params.dataset_name params.setup(zip(grid_parameters.keys(),parameter)) if old_dataset != params.dataset_name: print("switch %s to %s"%(old_dataset,params.dataset_name)) reader=dataset.setup(params) params.reader = reader # params.print() # dir_path,logger = units.getLogger() # params.save(dir_path) history,evaluation=run(params,reader) logger.info("%s : %.4f "%( params.to_string() ,max(history.history["val_acc"]))) K.clear_session()