def collect_loop(args): try: carla_process, out = open_carla(args.port, args.town_name, args.gpu, args.container_name) while True: try: with make_carla_client(args.host, args.port) as client: collect(client, args) break except TCPConnectionError as error: logging.error(error) time.sleep(1) # KILL CARLA TO AVOID ZOMBIES carla_process.kill() subprocess.call(['docker', 'stop', out[:-1]]) except KeyboardInterrupt: print('Killed By User') carla_process.kill() subprocess.call(['docker', 'stop', out[:-1]]) except: carla_process.kill() subprocess.call(['docker', 'stop', out[:-1]])
def collect_loop(args): while True: try: with make_carla_client(args.host, args.port) as client: collect(client, args) break except TCPConnectionError as error: logging.error(error) time.sleep(1)
def __task(self): agent_config = controller.load_agent_config() if agent_config == None: return #防火墙状态解析任务 if agent_config.get('state_job') != None and agent_config.get('state_job') == 'true': self.log.info('HOST AGENT LOAD STATUS START') collect.collect().get_status() self.log.info('HOST AGENT LOAD STATUS END') self.__execute(self.__task, [])
def record(): """Record meditation session and store in database.""" print "Rainbows!" port = request.form.get("port") print port collect(port, session["user_id"]) # Collect.py should literally be nothing more than a bridge between hardware and the data that comes out of it. # College.py should return an array of states. It shouldn't even know what the session is or what the user is. # all it should know is given a port, return the information about the user and return it back. return "Successfully collected data!"
def after_obs_bok (yr, mn, dy, run=None, path=None) : ''' A shell calls ls, check, collect, and footprint. Only for xao Standard path is /home/primefocus/data/bss/yyyymmdd Args: yr: year mn: month dy: day run: run name, default is yyyymm path: optional, root path of the day ''' # arguments default if run is None or run == "" : run = "{y:0>4d}{m:0>2d}".format(y=yr, m=mn, d=dy) if path is None or path == "" : path = "/home/primefocus/data/bss/{y:0>4d}{m:0>2d}{d:0>2d}".format(y=yr, m=mn, d=dy) # data of night class psite : pass site = psite() site.tz = -7 mjd18 = common.sky.mjd_of_night(yr, mn, dy, site) tel = "bok" # files of the day filelist = "{tel}/obsed/{run}/files.J{day:04d}.lst".format(tel=tel, run=run, day=mjd18) checkfile = "{tel}/obsed/{run}/check.J{day:04d}.lst".format(tel=tel, run=run, day=mjd18) obsedfile = "{tel}/obsed/{run}/obsed.J{day:04d}.lst".format(tel=tel, run=run, day=mjd18) repfile = "{tel}/obsed/footprint/Rep_J{mjd}.txt".format(tel=tel, mjd=mjd18) equfile = "{tel}/obsed/footprint/Equ_J{mjd}.png".format(tel=tel, mjd=mjd18) galfile = "{tel}/obsed/footprint/Gal_J{mjd}.png".format(tel=tel, mjd=mjd18) os.mkdir("{tel}/obsed/{run}".format(tel=tel, run=run, day=mjd18)) # 1. call ls os.system("ls {path}/d*.fits > {filelist}".format(path=path, filelist=filelist)) # 2. call check check(tel, yr, mn, dy, run) # 3. call collect collect(tel, yr, mn, dy, run) # 4. call footprint footprint(tel, reportfile=repfile, equfile=equfile, galfile=galfile, run=run, day=mjd18) # info print ("Send following files to SAGE Survey group:" "\n\t{filelist}" "\n\t{checkfile}" "\n\t{obsedfile}" "\n\t{equfile}" "\n\t{galfile}" "\n\t{repfile}".format( filelist=filelist, checkfile=checkfile, obsedfile=obsedfile, equfile=equfile, galfile=galfile, repfile=repfile ))
def _testunit(): from collect import collect logging.basicConfig(level=logging.INFO) snmp_ip = '61.182.128.1' snmp_community = 'IDCHBPTT2o' dev_id = 'test' database_name = 'idc_billing' current_month = time.strftime("%Y%m") mib_arg_list = [ {'mib': 'IF-MIB', 'key': 'ifIndex'}, {'mib': 'IF-MIB', 'key': 'ifDescr'}, {'mib': 'IF-MIB', 'key': 'ifHCInOctets'}, {'mib': 'IF-MIB', 'key': 'ifHCOutOctets'}, ] snmpobj = collect(snmp_ip, snmp_community) snmp_data = snmpobj.run(mib_arg_list) collections_name = '_'.join(['bill', dev_id, current_month]) snmp_database = snmpdb('110.249.213.22') snmp_database.useCollections(database_name, collections_name) snmp_database.writeSnmpData(snmp_data, time.time())
def SFile2CFile(self, hostname, port, username, password,date): global DATAPATH #NOW_DATE = strftime ("%Y%m", localtime ()) # 取当前年月 WORKPATH = getcwd () # 执行脚本的当前目录路径 DATAPATH = WORKPATH + "\data" # 执行脚本当前路径下的data目录 if path.exists(DATAPATH): # 目录不存在就创建 pass else: mkdir(DATAPATH) a = collect.collect () a.connect (hostname, port, username, password) CMD1 = "ls ~/log/Check/*.%s*.dat" % (date) # 根据年月查找对应的日志命令 SFile = a.command (CMD1, "none", "notitle") SFile = SFile.split ('\n') # 获取文件列表,带路径 CFileNameTEMP = [] # 用于函数返回的文件名 for i in SFile[:-1]: # 文件转存 CFileName = i[-18:-4] # 获取文件名 CFileName = "%s\%s"%(DATAPATH,CFileName) # 文件的绝对路径 CMD2 = "cat %s " % (i) SData = a.command (CMD2, "none", "notitle") CFile = open (CFileName, 'w') CFile.write (SData) CFile.close () CFileNameTEMP.append (CFileName) a.close () return CFileNameTEMP
def start(game, inputs_record): pygame.init() screen = pygame.display.set_mode( (draw.screen_w, draw.screen_h) ) pygame.display.set_caption("ice-hall") clock = pygame.time.Clock() millis = 1 exit = False while True: for event in pygame.event.get(): if event.type == pygame.QUIT: exit = True else: inputs_record = collect.collect(inputs_record, event) if exit: break game = update.update(game, inputs_record, millis/1000) draw.draw(screen, game) pygame.display.flip() millis = clock.tick(60) pygame.quit()
def reloadData(self, prefix, dataSetFullName): self.prefix = prefix self.dataName = dataSetFullName.rsplit('/')[-1] self.sc.data4d = collect(dataSetFullName, prefix=self.prefix) self.sp.setMaximum(self.sc.data4d.shape[0] - 1) self.sp.setValue(0) self.xy_plane_selected()
def __init__(self, parent=None): super(PlotWidget1D, self).__init__() self.parent = parent # data4d has four dimensions, the first is time self.prefix = 'data' self.data4d = collect("/Fields/", "Phi_global_avg", prefix=self.prefix) self.dataName = "electric potential" sizePolicy = QSizePolicy() sizePolicy.setHorizontalPolicy(QSizePolicy.Expanding) sizePolicy.setVerticalPolicy(QSizePolicy.Expanding) self.setSizePolicy(sizePolicy) self.plotVboxlayout = QVBoxLayout(self) self.tab_widget = QTabWidget(self) self.plotVboxlayout.addWidget(self.tab_widget) # matplotlib figure tab self.sc = MyStaticMplCanvas1D(self, self.data4d, title=self.dataName) self.sc.draw() self.tab_widget.addTab(self.sc, "Figures") # data tab self.data_widget = QTableWidget(self) self.set_data_widget(self.data4d[0, 0, :, :]) self.tab_widget.addTab(self.data_widget, "data") #> The slider self.sp_widget = QWidget(self) self.sp_layout = QHBoxLayout(self.sp_widget) self.label = QLabel("time") self.plainTextEdit = QTextEdit("0") self.plainTextEdit.setMaximumHeight(20) self.plainTextEdit.setMaximumWidth(100) self.sp = QSlider(QtCore.Qt.Horizontal) self.sp.setMinimum(0) self.sp.setMaximum(self.data4d.shape[0] - 1) self.sp.setTickPosition(QSlider.TicksBelow) self.sp.setTickInterval(1) self.sp.valueChanged.connect(self.timeChange) self.sp_layout.addWidget(self.label) self.sp_layout.addWidget(self.plainTextEdit) self.sp_layout.addWidget(self.sp) self.save_widget = QWidget(self) self.save_layout = QHBoxLayout(self.save_widget) self.button_saveFig = QPushButton("Save Figure") self.button_saveAnimation = QPushButton("Save Animation") self.save_layout.addWidget(self.button_saveFig) self.save_layout.addWidget(self.button_saveAnimation) self.button_saveAnimation.clicked.connect(self.save_animation) self.button_saveFig.clicked.connect(self.save_fig) #self.plotVboxlayout.addWidget(self.sc) self.plotVboxlayout.addWidget(self.sp_widget) self.plotVboxlayout.addWidget(self.save_widget)
def test_run(): logs = dict(collect()) line = '{"time": 123456, "time_elapsed": 12, "status": 200, "method_name": "GET", "blob": "foo"}' metrics = list(logs['suggestions_service'].process(line)) assert len(metrics) == 3 assert sum(1 for m in metrics if m.type == MetricType.COUNTER) == 2 assert sum(1 for m in metrics if m.type == MetricType.TIMER) == 1
def test_run(): log, triggers = [(l, t) for l, t in collect().items() if l.name == "suggest"][0] line = '{"time": 123456, "time_elapsed": 12, "status": 200, "method_name": "GET", "blob": "foo"}' metrics = list(process(log, triggers, line)) assert len(metrics) == 3 assert sum(1 for m in metrics if m.type == MetricType.COUNTER) == 2 assert sum(1 for m in metrics if m.type == MetricType.TIMER) == 1
def main(): lengthOfArgs = len(sys.argv) currentPath = os.getcwd() if (lengthOfArgs > 1): if (sys.argv[1] == 'init'): if len(sys.argv) > 2: modeSelection(currentPath, sys.argv[2]) if len(sys.argv) <= 2: modeSelection(currentPath, None) if (sys.argv[1] == 'collect'): conf = open(os.path.join(currentPath, 'config.json')) path = json.loads(conf.readline()) mode = path['mode'] src, dist = [path['src'], path['mainSrc']], [path['dist'], path['mainDist']] conf.close() collect(mode, src, dist)
def loop1(): bar = Bar('Collecting SRIs', max= (int((1/14)*len(teams)))) for team in teams[0:(int((1/14)*len(teams)))]: data1[team] = collect(team) # print(data) bar.next() with open('sri/data1.pkl', 'wb') as handle: pickle.dump(data1, handle, protocol=pickle.HIGHEST_PROTOCOL) bar.finish()
def reloadData(self, prefix, dataSetFullName): self.prefix = prefix self.dataName = dataSetFullName.rsplit('/')[-1] self.data4d = collect(dataSetFullName, prefix=self.prefix) self.sc.compute_initial_figure(self.data4d, self.dataName) self.sc.draw() self.set_data_widget(self.data4d[0, 0, :, :]) self.sp.setMaximum(self.data4d.shape[0] - 1) self.sp.setValue(0)
def main(): """\ Script for producing a plot of the forward hopping of protons from a RAPTOR output (evb.out). This is only tested for the case of a dissolved proton (e.g. H3O in a box of nH2O). CHANGELOG 9-20-2013 DWS v1.0 Initial build. Currently the code outputs data to screen or plot it. """ from argparse import ArgumentParser, RawDescriptionHelpFormatter from textwrap import dedent parser = ArgumentParser(description=dedent(main.__doc__), formatter_class=RawDescriptionHelpFormatter) parser.add_argument('--version', action='version', version='%(prog)s 1.0') parser.add_argument('files', help='The files that you want converted.' , nargs='+') args = parser.parse_args() # Store command line arguments as convenient variables fh = args.files[0] # Collect EVB simulation data evb_data = collect(fh) # Get the timestep and rxncenters from an MS-EVB simulation ts = evb_data['TIMESTEP'] rxncenter = [i[1] for i in evb_data['RXNCENTER']] hopfxn = eval_hop_function(ts, rxncenter) # Plot the data plt.rc('text', usetex=True) plt.rc('font', **{'family':'serif', 'serif': ['Times'], 'size': 30}) fig = plt.figure() x = array(ts)/1000.0 y = hopfxn sub = fig.add_subplot(111) sub.plot(x, y, 'b', linewidth=3) # Adjust graph scale fig.subplots_adjust(left=0.12, right=0.90, bottom=0.1, top=0.9) # Title, labels sub.set_xlabel(r'time (ps)') sub.set_ylabel(r'Forward Hop') # Axis limits sub.set_xlim([0,x[-1]]) #sub.set_xlim([1300,1600]) sub.set_ylim([0,y[-1]]) #sub.set_ylim([480,610]) plt.show()
def scan(self): # observable is dictionary which contains three entries: the 'device' refers to the device through which we access sensors, 'attribute' referring list of attributes to record and 'economy' that is intended for multidimensional measurements to indicate whether full measurement or just some global value (like e.g. mean) should be stored. self.collect = Collect.collect( ) # needed only for opening and closing safety shutter self.collect.test = False #self.setPhase(2) # set to locate if self.aperture_index is not None: self.set_aperture(self.aperture_index) self.wait(self.motor_device) self.checkSteps() self.checkLengths() self.checkNbSteps() self.setExposure( self.exposure ) #0.05 for 8 bunch mode; 0.25 for 1 bunch mode, otherwise 0.005 startTransmission = self.transmission( ) # remembering starting transmission, we will put it back after the scan self.setFP() #while self.collect.mono_mt_rx.state().name != 'OFF': #self.collect.safeTurnOff(self.collect.mono_mt_rx) #time.sleep(0.1) #self.collect.setEnergy(12.65) #self.transmission(85) self.setZoom(10) self.putScannedObjectInBeam() # center will contain current values of the scanned object self.center = [ self.motor_device.read_attribute(self.shortFull[motor]).value for motor in self.motors ] # precalculating all the measurement positions self.calculatePositions() self.collect.openSafetyShutter() self.wait(self.motor_device) self.motor_device.fastshutterisopen = True #OpenFastShutter() self.motor_device.write_attribute('frontlightlevel', 0) self.motor_device.write_attribute('frontlightison', False) self.linearizedScan() self.duration = time.time() - self.start self.motor_device.fastshutterisopen = False #CloseFastShutter() #self.collect.mono_mt_rx.On() #self.collect.closeSafetyShutter() self.setExposure(0.050) #self.transmission(startTransmission) self.putScannedObjectInBeam()
def snmprun_process(self, args): snmpobj = collect(args['snmp_ip'], args['snmp_community']) snmp_data = snmpobj.run(snmpConfig.snmp_mib) current_time = time.time() # write multiple database for backup for database in snmpConfig.mongo_db_list: try: mongo_db_ip = database['ip'] mongo_db_port = database['port'] snmp_database = snmpdb(mongo_db_ip, mongo_db_port) snmp_database.useCollections(args['db_name'], args['table_name']) snmp_database.writeSnmpData(snmp_data, current_time) except: logging.error("Write to database %s error" % ip_address) logging.debug("Process dev: %s ip: %s pid: %s complete" % (args['dev_id'], args['snmp_ip'], str(os.getpid())))
def scan(self): # observable is dictionary which contains three entries: the 'device' refers to the device through which we access sensors, 'attribute' referring list of attributes to record and 'economy' that is intended for multidimensional measurements to indicate whether full measurement or just some global value (like e.g. mean) should be stored. self.collect = Collect.collect() # needed only for opening and closing safety shutter self.collect.test = False #self.setPhase(2) # set to locate if self.aperture_index is not None: self.set_aperture(self.aperture_index) self.wait(self.motor_device) self.checkSteps() self.checkLengths() self.checkNbSteps() self.setExposure(self.exposure) #0.05 for 8 bunch mode; 0.25 for 1 bunch mode, otherwise 0.005 startTransmission = self.transmission() # remembering starting transmission, we will put it back after the scan self.setFP() #while self.collect.mono_mt_rx.state().name != 'OFF': #self.collect.safeTurnOff(self.collect.mono_mt_rx) #time.sleep(0.1) #self.collect.setEnergy(12.65) #self.transmission(85) self.setZoom(10) self.putScannedObjectInBeam() # center will contain current values of the scanned object self.center = [self.motor_device.read_attribute(self.shortFull[motor]).value for motor in self.motors] # precalculating all the measurement positions self.calculatePositions() self.collect.openSafetyShutter() self.wait(self.motor_device) self.motor_device.fastshutterisopen = True #OpenFastShutter() self.motor_device.write_attribute('frontlightlevel', 0) self.motor_device.write_attribute('frontlightison', False) self.linearizedScan() self.duration = time.time() - self.start self.motor_device.fastshutterisopen = False #CloseFastShutter() #self.collect.mono_mt_rx.On() #self.collect.closeSafetyShutter() self.setExposure(0.050) #self.transmission(startTransmission) self.putScannedObjectInBeam()
def getDeviceInfo(self, snmp_ip, snmp_community, udp_port=161): snmpobj = collect(snmp_ip, snmp_community, udp_port) device_info = snmpobj.run(self.deviceInfoMIB, 'snmpget') if device_info is None: logging.error("ERROR! Get device snmp from %s:%s with %s error!" % (snmp_ip, udp_port, snmp_community)) return False else: self.snmpobj = snmpobj self.snmp_ip = snmp_ip self.snmp_community = snmp_community self.udp_port = udp_port self.device_info = self.parseDeviceInfo(device_info) logging.info("Get device snmp ip %s sys name %s " % (snmp_ip, self.device_info['sysName'])) return True
def handle (self): addr = self.request.getpeername() log.info('Got a connection from {0}'.format(str(addr))) while True: data = self.request.recv(socketclient.SOCKET_BUFFER_SIZE).strip() if not data:break log.info('receive from ({0}):\n{1}'.format(self.client_address, data)) data_dict = ndb.load_string(data) taskinfo = data_dict.get('root') if taskinfo == None: self.request.send('task command error'.encode('utf8')) return task_type = taskinfo.get('tasktype') _type = taskinfo.get('type') task_id = taskinfo.get('taskid') send_data = {'error':'error'} if task_type =='collect' and _type == 'rules': data_dict['datas'] = config_cache.load_rules() send_data = ndb.build_node('root', data_dict) elif task_type =='collect' and _type == 'agent': data_dict['datas'] = controller.load_agent_config() send_data = ndb.build_node('root', data_dict) elif task_type =='collect' and _type == 'status': data_dict['datas'] = collect.collect().load_status() elif task_type =='collect' and _type == 'sysinfo': data_dict['datas'] = config_cache.load_sysinfo() elif task_type =='control' and _type == 'rule': datas = taskinfo.get('datas') data_dict['datas'] = task_manage.execute_job(task_id, datas) elif task_type =='control' and _type == 'preview': datas = taskinfo.get('datas') data_dict['datas'] = task_manage.preview_job(datas) send_data = ndb.build_node('root', data_dict) self.request.send(send_data.encode('utf8'))
def _testunit(): from collect import collect logging.basicConfig(level=logging.INFO) snmp_ip = '61.182.128.1' snmp_community = 'IDCHBPTT2o' dev_id = 'test' database_name = 'idc_billing' current_month = time.strftime("%Y%m") mib_arg_list = [ { 'mib': 'IF-MIB', 'key': 'ifIndex' }, { 'mib': 'IF-MIB', 'key': 'ifDescr' }, { 'mib': 'IF-MIB', 'key': 'ifHCInOctets' }, { 'mib': 'IF-MIB', 'key': 'ifHCOutOctets' }, ] snmpobj = collect(snmp_ip, snmp_community) snmp_data = snmpobj.run(mib_arg_list) collections_name = '_'.join(['bill', dev_id, current_month]) snmp_database = snmpdb('110.249.213.22') snmp_database.useCollections(database_name, collections_name) snmp_database.writeSnmpData(snmp_data, time.time())
try: # user menu selection = str( input('Input option:\n' ' (c: collect)\n' ' (con: continue_loading)\n' ' (r: output stored dates\n' ' (p: plot)\n' ' (com: common words)\n' ' (u: update)\n' ' (w: select common word to plot)\n' '>>> ')) if selection == 'c': # collect tweets. For loading most recent tweets into set file collect.collect(read_file, user, int(input('Input number of tweets to collect: '))) elif selection == 'con': # continue loading tweets for a selected user (from the oldest tweet stored on) lastID = int(read.read(read_file)[-1][0]) num_collect = str( input('Default collect num? Default runs to rate limit (y/n): ')) if num_collect == 'y': # input parsing collect.continue_loading(lastID, read_file, user) elif num_collect == 'n': collect.continue_loading(lastID, read_file, user, int(input('Num tweets to collect: '))) else: print('Invalid input. Rerun and try again...') elif selection == 'p': # plots the 10 most commonly used words for specified user. Option to change the moving average size avg_len = 7
def main(): """\ Script for producing a plot of the a probability density function from the CI coefficients of a RAPTOR output (evb.out). This mimics the concept proposed in the MS-EVB3 paper. CHANGELOG 9-20-2013 DWS v1.0 Initial build. Currently the code outputs data to screen or plot it. """ from argparse import ArgumentParser, RawDescriptionHelpFormatter from textwrap import dedent parser = ArgumentParser(description=dedent(main.__doc__), formatter_class=RawDescriptionHelpFormatter) parser.add_argument('--version', action='version', version='%(prog)s 1.0') parser.add_argument('-b', '--bins', help='Number of bins for generating plot.', default=200) parser.add_argument('-f', '--freeenergy', help='Use the probability density function ' 'to generate a free energy profile from the largest MS-EVB amplitude.', action='store_true', default=False) parser.add_argument('-fd', '--freeenergydiff', help='Use the probability density function ' 'difference between the largest and second largest MS-EVB amplitude ' 'to generate a free energy profile.', action='store_true', default=False) parser.add_argument('files', help='The files that you want converted.' , nargs='+') args = parser.parse_args() # Store command line arguments as convenient variables fh = args.files[0] nbins = args.bins genfe = args.freeenergy genfed = args.freeenergydiff # Collect EVB simulation data evb_data = collect(fh) # Get the largest (first) and second largest (second) CI vector # from an MS-EVB simulation tmp = [i[1] for i in evb_data['CI_VECTOR']] first = array([]) second =array([]) for i in tmp: l = max(i) l2 = second_largest(i) # Note that we store the squared CI coefficient first = append(first, l*l) second = append(second, l2*l2) # Generate probability densities. Here we use the kernel density estimator # to give a smooth PDF. pdf1 = gaussian_kde(first) #bin_pdf1 = linspace( min(first), max(first), nbins) bin_pdf1 = linspace( 0, 1, nbins ) pdf2 = gaussian_kde(second) #bin_pdf2 = linspace( min(second), max(second), nbins) bin_pdf2 = linspace( 0, 1, nbins ) if genfed: pdf3 = gaussian_kde(first-second) pdf4 = gaussian_kde(second-first) bin_pdf3 = linspace( -1, 1, 2*nbins ) # If requested, generate the free energy corresponding to the probability # density if genfe or genfed: free_energy = array([]) kb = 1.3806488e-23 # J/K # 1 kcal/mol = 6.9477e-21 J kb_kcal = kb / 6.9477e-21 if genfe: for i in bin_pdf1: val = -kb_kcal*300.0*log(pdf1(i)) free_energy = append(free_energy, val) else: for i in bin_pdf3: if i < -0.04: val = -kb_kcal*300.0*log(pdf4(i)) elif i > 0.04: val = -kb_kcal*300.0*log(pdf3(i)) else: val = 0.0 free_energy = append(free_energy, val) # Shift the zero point on the graph. minfe = min(free_energy) free_energy += -minfe # Plot the data plt.rc('text', usetex=True) plt.rc('font', **{'family':'serif', 'serif': ['Times'], 'size': 30}) fig = plt.figure() sub = fig.add_subplot(111) if genfe: sub.plot(bin_pdf1, free_energy, 'b', linewidth=3) elif genfed: sub.plot(bin_pdf3, free_energy, 'b', linewidth=3) else: sub.plot(bin_pdf1, pdf1(bin_pdf1), 'b', linewidth=3) sub.plot(bin_pdf2, pdf2(bin_pdf2), 'r', linewidth=3) # Adjust graph scale fig.subplots_adjust(left=0.12, right=0.90, bottom=0.1, top=0.9) # Title, labels if genfe: sub.set_xlabel(r'c$_1^2$') sub.set_ylabel(r'Free Energy (kcal/mol)') elif genfed: sub.set_xlabel(r'c$_1^2$-c$_2^2$') sub.set_ylabel(r'Free Energy (kcal/mol)') else: sub.set_xlabel(r'c$_i^2$') sub.set_ylabel(r'Probability Density') # Axis limits if genfe: sub.set_xlim([0.35,0.85]) sub.set_ylim([min(free_energy),3]) elif genfed: sub.set_xlim([-0.80,0.80]) sub.set_ylim([min(free_energy),3]) else: sub.set_xlim([0,0.85]) sub.set_ylim([0,8]) # Minor tick marks if genfed: xminor = MultipleLocator(0.05) else: xminor = MultipleLocator(0.01) yminor = MultipleLocator(0.1) sub.xaxis.set_minor_locator(xminor) sub.yaxis.set_minor_locator(yminor) plt.show()
if len(sys.argv) == 2: t = int(sys.argv[1]) elif len(sys.argv) > 2: print("error: should have one argument, as time step") else: t = itime n0 = 1.0e19 Ex0 = 1.0e6 x_step = 20 level_num = 20 amplification_factor = 80.0 data_temp = collect("/Fields/", "Phi_global_avg") nx = data_temp.shape[3] print("data shape: ", data_temp.shape) dx = 0.5e-5 # unit (m) x = np.linspace(0, nx * dx, nx) x = x * amplification_factor x_less = x[::x_step] xmin = x.min() xmax = x.max() x0 = int(nx / 2) x1 = 100 ix = 3
heat_flux_list0_right = [] heat_flux_list1_left = [] heat_flux_list1_right = [] path_list = [ "ref/data", "ref_bevel1/data", "ref_bevel2/data", "ref_bevel3/data", "ref_bevel4/data" ] labels = [ r"$\mathrm{h}\ =\ 0$", r"$\mathrm{h}\ =\ 1$", r"$\mathrm{h}\ =\ 2$", r"$\mathrm{h}\ =\ 3$", r"$\mathrm{h}\ =\ 4$" ] for path in path_list: # read segment information length = collect("length", prefix="grid", path=path) length = length[0, :] length = length * 1.0e3 n_segments = collect("n_segments", prefix="grid", path=path) n_segments = n_segments[0, :] print(n_segments.shape, type(n_segments[0]), n_segments[0]) nx_start_left = 0 nx_end_left = n_segments[0] + n_segments[1] nx_start_right = nx_end_left + n_segments[2] nx_end_right = nx_start_right + n_segments[3] + n_segments[4] x0_left = 0.0 x0_right = 0.0
def main_opts(parser, options, args): if len(args) < 2: parser.print_help() exit(2) action = args[0] device_url = args[1] if action not in actions: print "unknown", action parser.print_help() exit(2) eg = egcfg(device_url, options.username, options.password, logger) eg.timeout = int(options.timeout) pushInterval = None retval = 0 if options.pushInterval: pushInterval = int(options.pushInterval) if action == "register": eg.register(options.pushURI, pushInterval, options.seconds) if action == "de-register": eg.register("", pushInterval, options.seconds) if action == "channelchecker": eg.channelchecker(int(options.samples)) elif action == "reboot": eg.reboot() elif action == "upgrade": eg.upgrade(options.branch) elif action == "upgrade-kernel": eg.upgrade_kernel() elif action == "netconfig": eg.netconfig() elif action == "status": eg.status() elif action == "getntp": eg.getntp() elif action == "is-caught-up": ok, dt, data = eg.current_readings() if not ok: retval = -1 elif action == "get": eg.get(options.path) elif action == "getpushstatus": eg.getpushstatus() elif action == "getconfig": eg.getcfg(options.cfgfile) elif action == "setconfig": eg.setcfg(options.cfgfile) elif action == "wait": eg.wait() elif action == "setntp": if options.ntpServer is None: print "ntpServer is required for setntp" exit(2) eg.setntp(options.ntpServer) elif action == "setpassword": if options.new_password is None: print "new-passsword is required for setpassword" exit(2) eg.setpassword(options.new_password) elif action == "getregisters": eg.getregisters(options.cfgfile, version=options.version) elif action == "setregisters": eg.setregisters(options.cfgfile, options.skip_backup, version=options.version) elif action == "rotate-voltage-config": eg.rotate_voltage_cofig() elif action == "auto-phase-match": import egauge_auto_config data = egauge_auto_config.auto_phase_match(eg, options.samples, options.restore) elif action == "collect-data": if options.datafile is None: print "datafile is a required arg" exit(2) elif abs(datetime.utcnow() - eg.getegaugetime()) > timedelta(minutes=5): print "Time on egauge is out of sync with the real world!!!" exit(2) else: # disable reporting when starting data collection eg.register("", pushInterval, options.seconds) import imp import collect import scheduler if hasattr(options, "scheduler_module") and hasattr(options, "scheduler_class"): scheduler = getattr(imp.load_source("sched", options.scheduler_module), options.scheduler_class)() collect.collect(eg, options, scheduler) else: options.scheduler_module = os.path.realpath(scheduler.__file__).replace(".pyc", ".py") if options.scheduler_type is None: options.scheduler_class = scheduler.SchedulerNull.__name__ collect.collect(eg, options, scheduler.SchedulerNull()) elif options.scheduler_type == "local": options.scheduler_class = scheduler.SchedulerLocal.__name__ collect.collect(eg, options, scheduler.SchedulerLocal()) else: print "Invalid scheduler" exit(2) elif action == "analyze-phase": if options.datafile is None or options.mappingfile is None: print "datafile and mappingfile are required args" exit(2) if options.ctgroups is not None: options.ctgroups = json.loads(options.ctgroups) analyze_phase_fn(options.datafile, options.mappingfile, options.ctgroups) if hasattr(options, "exit") is False or options.exit is True: exit(retval)
from stop_words import get_stop_words from collect import collect, remove_common_words n_components = 1000 n_features = 10000 random_state = 1 target_names = ['pr', 'po', 'au'] try: with open('boards/data.json', 'r', encoding='utf8') as f: data = load(f) except FileNotFoundError: d = collect(target_names, 100, random_state) data = [] for k in d: for e in d[k]: data.append(remove_common_words(e)) with open('boards/data.json', 'w+', encoding='utf8') as f: dump(data, f, ensure_ascii=False) data2 = data.copy() seed(random_state) shuffle(data) inverse_index = [] for i in range(len(data)): inverse_index.append(data2.index(data[i]))
# -*- coding: utf-8 -*- """ Created on Mon Jul 13 10:52:59 2015 @author: JackChen """ import convert import collect import capture convert.convertxlsx2csv() capture.do_changeFormat() collect.collect()
def loop12(): for team in teams[(int((11/14)*len(teams))):(int((12/14)*len(teams)))]: data12[team] = collect(team) with open('sri/data12.pkl', 'wb') as handle: pickle.dump(data12, handle, protocol=pickle.HIGHEST_PROTOCOL)
import pandas as pd from collect import collect from progress.bar import Bar df = pd.read_pickle("team_dataframes/teamList.pkl") teamList = df["Team Number: "].tolist() # page = Raschietto.from_url("https://vexdb.io/events/view/RE-VRC-17-4462") # teamNums = Matcher('.number') # teamList = teamNums(page, multiple=True) teamStats = [] bar = Bar('Collecting Team Data', max=len(teamList)) for team in teamList: teamStats.append(collect(team)) bar.next() bar.finish() page = Raschietto.from_url( "https://vexdb.io/events/view/RE-VRC-17-4462?t=results") results = Matcher('.result-box') results = results(page, multiple=True) blueScores = [] blueAlliance1 = [] blueAlliance2 = [] redScores = [] redAlliance1 = [] redAlliance2 = []
def main(): """\ Script for putting coordinates from an XYZ file into a template LAMMPS data file. This works by taking the coordinates from the XYZ file and transplanting them into the already made LAMMPS data file. """ from argparse import ArgumentParser, RawDescriptionHelpFormatter from textwrap import dedent parser = ArgumentParser(description=dedent(main.__doc__), formatter_class=RawDescriptionHelpFormatter) parser.add_argument('--version', action='version', version='%(prog)s 1.0') parser.add_argument('-t', '--template', help='Template LAMMPS data file.', required=True) parser.add_argument('files', help='The XYZ file(s).', nargs='+') args = parser.parse_args() # Store command line arguments as convenient variables fh = args.files template = args.template # Read template LAMMPS data file into memory with open(template) as fl: tf = tuple([line.rstrip() for line in fl]) natoms = int(tf[2].split()[0]) # Number of atoms nlines = len(tf) # Number of lines # Determine where to insert coordinates in the template file for i,ln in enumerate(tf): if 'Atoms' in ln: s = i + 2 e = s + natoms break fmtln = '{0:>4} {1:>4} {2:>2} {3:>12} {4:11.4e} {5:11.4e} {6:11.4e}' # Add the coordinates to the template file for each XYZ file for f in fh: xyz = collect(f) coords = xyz['COORDS'] fname = f.split('.')[0] + '.data' fl = open(fname,'w') # Print header for i in range(s): print(tf[i], file=fl) # Print atomblock j = 0 for i in range(s,e): tmp = tf[i].split() print( fmtln.format( tmp[0], tmp[1], tmp[2], tmp[3], coords[j][0], coords[j][1], coords[j][2] ), file=fl ) j += 1 print(j) # Print end for i in range(e,nlines): print(tf[i], file=fl) fl.close()
from collect import collect, remove_common_words n_components = 1000 n_features = 10000 random_state = 1 target_names = ['pr', 'po', 'au'] try: with open('boards/data.json', 'r', encoding='utf8') as f: data = load(f) except FileNotFoundError: d = collect(target_names, 100, random_state) data = [] for k in d: for e in d[k]: data.append(remove_common_words(e)) with open('boards/data.json', 'w+', encoding='utf8') as f: dump(data, f, ensure_ascii=False) data2 = data.copy() seed(random_state) shuffle(data) inverse_index = [] for i in range(len(data)): inverse_index.append(data2.index(data[i]))
if len(sys.argv) == 2: t = int(sys.argv[1]) elif len(sys.argv) > 2: print("error: should have one argument, as time step") else: t = 0 print("time: ", t) ##inite the fig of matplotlib fig = plt.figure(figsize=(10, 8)) fig.subplots_adjust(top=0.9, bottom=0.1, wspace=0.6, hspace=0.55) #============angle distribution======================================= val = collect("/Diagnostic/", "angle_distribution_right", itime=t) val1_1d = val[0, 0, :] val2_1d = val[0, 1, :] print(val.shape) nx = (val1_1d.shape)[0] x = np.linspace(0, nx, nx) ax0 = fig.add_subplot(2, 1, 1) ax0.yaxis.set_major_formatter(yformatter) line0 = ax0.plot(x, val1_1d, label='Electron', linestyle=linestyles[0]) line0 = ax0.plot(x, val2_1d, label=r'$\mathrm{D^+}$ ion',
clush_command.append('-f') clush_command.append(args.fault) if args.mean_runtime is not None: clush_command.append('-m') clush_command.append(str(args.mean_runtime)) if args.heap: clush_command.append('-x') if args.anon: clush_command.append('-y') if args.stack: clush_command.append('-z') if args.random_flip_rate: clush_command.append('-rfr') if args.suffix: clush_command.append('--suffix') clush_command.append(args.suffix) print(f'Running command: {" ".join(clush_command)}', flush=True) process = subprocess.Popen(clush_command) process.wait() path = os.path.join(args.working_directory, exp_name) collect(nodes=args.nodes, path=path, output=os.path.join(args.output_dir, f'{exp_name}_results.sqlite'))
def init_config_cache(force_refresh): set_config_cache(collect.collect().get_config(force_refresh))
def loop4(): for team in teams[(int((3/14)*len(teams))):(int((4/14)*len(teams)))]: data4[team] = collect(team) with open('sri/data4.pkl', 'wb') as handle: pickle.dump(data4, handle, protocol=pickle.HIGHEST_PROTOCOL)
def test_owners(): for name, log in collect(): for fn in log.fns: assert hasattr(fn, "owners")
def main(): """\ Script for producing a plot of the a probability density function from the CI coefficients of a RAPTOR output (evb.out). This mimics the concept proposed in the MS-EVB3 paper. CHANGELOG 9-20-2013 DWS v1.0 Initial build. Currently the code outputs data to screen or plot it. """ from argparse import ArgumentParser, RawDescriptionHelpFormatter from textwrap import dedent parser = ArgumentParser(description=dedent(main.__doc__), formatter_class=RawDescriptionHelpFormatter) parser.add_argument('--version', action='version', version='%(prog)s 1.0') parser.add_argument('-b', '--bins', help='Number of bins for generating plot.', default=200) parser.add_argument( '-f', '--freeenergy', help='Use the probability density function ' 'to generate a free energy profile from the largest MS-EVB amplitude.', action='store_true', default=False) parser.add_argument( '-fd', '--freeenergydiff', help='Use the probability density function ' 'difference between the largest and second largest MS-EVB amplitude ' 'to generate a free energy profile.', action='store_true', default=False) parser.add_argument('files', help='The files that you want converted.', nargs='+') args = parser.parse_args() # Store command line arguments as convenient variables fh = args.files[0] nbins = args.bins genfe = args.freeenergy genfed = args.freeenergydiff # Collect EVB simulation data evb_data = collect(fh) # Get the largest (first) and second largest (second) CI vector # from an MS-EVB simulation tmp = [i[1] for i in evb_data['CI_VECTOR']] first = array([]) second = array([]) for i in tmp: l = max(i) l2 = second_largest(i) # Note that we store the squared CI coefficient first = append(first, l * l) second = append(second, l2 * l2) # Generate probability densities. Here we use the kernel density estimator # to give a smooth PDF. pdf1 = gaussian_kde(first) #bin_pdf1 = linspace( min(first), max(first), nbins) bin_pdf1 = linspace(0, 1, nbins) pdf2 = gaussian_kde(second) #bin_pdf2 = linspace( min(second), max(second), nbins) bin_pdf2 = linspace(0, 1, nbins) if genfed: pdf3 = gaussian_kde(first - second) pdf4 = gaussian_kde(second - first) bin_pdf3 = linspace(-1, 1, 2 * nbins) # If requested, generate the free energy corresponding to the probability # density if genfe or genfed: free_energy = array([]) kb = 1.3806488e-23 # J/K # 1 kcal/mol = 6.9477e-21 J kb_kcal = kb / 6.9477e-21 if genfe: for i in bin_pdf1: val = -kb_kcal * 300.0 * log(pdf1(i)) free_energy = append(free_energy, val) else: for i in bin_pdf3: if i < -0.04: val = -kb_kcal * 300.0 * log(pdf4(i)) elif i > 0.04: val = -kb_kcal * 300.0 * log(pdf3(i)) else: val = 0.0 free_energy = append(free_energy, val) # Shift the zero point on the graph. minfe = min(free_energy) free_energy += -minfe # Plot the data plt.rc('text', usetex=True) plt.rc('font', **{'family': 'serif', 'serif': ['Times'], 'size': 30}) fig = plt.figure() sub = fig.add_subplot(111) if genfe: sub.plot(bin_pdf1, free_energy, 'b', linewidth=3) elif genfed: sub.plot(bin_pdf3, free_energy, 'b', linewidth=3) else: sub.plot(bin_pdf1, pdf1(bin_pdf1), 'b', linewidth=3) sub.plot(bin_pdf2, pdf2(bin_pdf2), 'r', linewidth=3) # Adjust graph scale fig.subplots_adjust(left=0.12, right=0.90, bottom=0.1, top=0.9) # Title, labels if genfe: sub.set_xlabel(r'c$_1^2$') sub.set_ylabel(r'Free Energy (kcal/mol)') elif genfed: sub.set_xlabel(r'c$_1^2$-c$_2^2$') sub.set_ylabel(r'Free Energy (kcal/mol)') else: sub.set_xlabel(r'c$_i^2$') sub.set_ylabel(r'Probability Density') # Axis limits if genfe: sub.set_xlim([0.35, 0.85]) sub.set_ylim([min(free_energy), 3]) elif genfed: sub.set_xlim([-0.80, 0.80]) sub.set_ylim([min(free_energy), 3]) else: sub.set_xlim([0, 0.85]) sub.set_ylim([0, 8]) # Minor tick marks if genfed: xminor = MultipleLocator(0.05) else: xminor = MultipleLocator(0.01) yminor = MultipleLocator(0.1) sub.xaxis.set_minor_locator(xminor) sub.yaxis.set_minor_locator(yminor) plt.show()
# to schedule the next run nhours from now. PREFIX = "/tmp" SCHED = True def schedule(self, nminutes, eg, options): fileurl = "{}/{}".format(self.PREFIX, uuid.uuid4()) egauge_config.write_url((eg, options), fileurl, use_pickle=True) cmd_file = "{}/{}".format(self.PREFIX, uuid.uuid4()) with open(cmd_file, "wt") as fl: fl.write( schedule_script.format(eg.devurl.netloc, os.path.realpath(__file__).replace(".pyc", ".py"), fileurl) ) print "bash {}".format(cmd_file) if self.SCHED is True: # write command to a file from sh import at at("-f", cmd_file, "now+{}minutes".format(nminutes)) class SchedulerNull(SchedulerLocal): SCHED = False if __name__ == "__main__": eg, options = egauge_config.read_url(sys.argv[1], use_pickle=True) scheduler = getattr(imp.load_source("sched", options.scheduler_module), options.scheduler_class)() collect.collect(eg, options, scheduler)
line = str(point['Value']['StringWithMarkup'][0]['String']).lower() if 'decomp' not in line and \ 'greater' not in line and \ 'less' not in line and \ 'approx' not in line and \ '<' not in line and \ '>' not in line: # Get either a range of temperature or the measured temperature if '°c' in line: if re.match(r"\d*-\d+", line): ranges = re.findall(r"\d*-\d+", line)[0].split['-'] mp.append((float(ranges[0]) + float(ranges[1])) / 2) else: mp.append(float(re.findall(r"\d*", line)[0])) # Same as getting celsius just convert elif '°f' in line: if re.match(r"\d*-\d+", line): ranges = re.findall(r"\d*-\d+", line)[0].split['-'] mp.append((((float(ranges[0]) + float(ranges[1])) / 2) - 32) * (5 / 9)) else: mp.append((float(re.findall(r"\d*", line)[0]) - 32) * (5 / 9)) if len(mp) == 0: return None else: return sum(mp) / len(mp) # Takes list of molecules and outputs the smiles and corresponding melting point from melting_point function collect('data/melting_point/list.txt', 'mp.txt', MeltingPoint)
def main(): """\ Script for reading XYZ files (*.xyz). This is designed to work with the output of VMD solvation. CHANGELOG 12-28-2013 DWS v1.3 Modified for generating the environment file 12-11-2013 DWS v1.2 Modified for rearranging molecules based on proximity 10-2-2013 DWS v1.1 Modified to work for CO3, HCO3, H2CO3 9-25-2013 DWS v1.0 Initial build. """ from argparse import ArgumentParser, RawDescriptionHelpFormatter from textwrap import dedent parser = ArgumentParser(description=dedent(main.__doc__), formatter_class=RawDescriptionHelpFormatter) parser.add_argument('--version', action='version', version='%(prog)s 1.0') parser.add_argument('-m', '--molecules', help='Molecules in the simulation, including . Read in ' 'as "M1" "M2" ... list', nargs='+') parser.add_argument('-t', '--types', help='Atom types in the simulation. Read in as ' '"# Atom" in a space separated list', nargs='+') parser.add_argument('-r', '--rearrange', help='Rearrange atoms in XYZ file based on proximity to solute.', action='store_true', default=False) parser.add_argument('-w', '--within', help='Split the output based on the number of molecules within a ' 'user defined distance to the solute.', required=False) parser.add_argument('-e', '--env', help='Generate an env file for the QM region in a CP2K QMMM ' 'calculation.', action='store_true', required=False) parser.add_argument('-ns', '--nsolute', help='Number of atoms in the solute molecule.', required=False) parser.add_argument('files', help='The files that you want converted.', nargs='+') args = parser.parse_args() # Store command line arguments as convenient variables fh = args.files[0] molecules = args.molecules types = args.types rearrange = args.rearrange env = args.env nsolute = int(args.nsolute) if args.within: within = float(args.within) # Also store the squared distance because we use that later. within2 = within*within else: within = None # Molecule data, including number of atoms, central atom, charges moldata = { 'H2O' : { 'Natoms' : 3, 'Catom' : 'O', 'NBonds' : 2, 'NAngles' : 1, 'Mult_cent' : False, 'Atoms' : ['O', 'H', 'H'], 'Connect' : ['H', 'H'], 'OW' : -0.82, 'HW' : 0.41 }, 'H3O' : { 'Natoms' : 4, 'Catom' : 'O', 'NBonds' : 3, 'NAngles' : 3, 'Mult_cent' : False, 'Atoms' : ['O', 'H', 'H', 'H'], 'Connect' : ['H', 'H', 'H'], 'OH' : -0.50, 'HH' : 0.50 }, 'CO2' : { 'Natoms' : 3, 'Catom' : 'C', 'NBonds' : 2, 'NAngles' : 1, 'Mult_cent' : False, 'Atoms' : ['C', 'O', 'O'], 'Connect' : ['O', 'O'], 'O' : -0.3256, 'C' : 0.6512 }, 'CO3' : { 'Natoms' : 4, 'Catom' : 'C', 'NBonds' : 3, 'NAngles' : 3, 'Mult_cent' : False, 'Atoms' : ['C', 'O', 'O', 'O'], 'Connect' : ['O', 'O', 'O'], 'O' : -1.0000, 'C' : 1.0000 }, 'HCO3' : { 'Natoms' : 5, 'Catom' : 'C', 'NBonds' : 4, 'NAngles' : 4, 'Mult_cent' : True, 'Atoms' : ['C', 'O', 'O', 'O', 'H'], 'Connect' : ['O', 'O', 'O'], 'O' : -1.0000, 'C' : 1.0000, 'H' : 1.0000 }, 'H2CO3' : { 'Natoms' : 6, 'Catom' : 'C', 'NBonds' : 5, 'NAngles' : 5, 'Mult_cent' : True, 'Atoms' : ['C', 'O', 'O', 'O', 'H', 'H'], 'Connect' : ['O', 'O', 'O'], 'O' : -1.0000, 'C' : 1.0000, 'H' : 1.0000 }, } # H2O parameters are from: jcp_124_024503 # H3O parameters are from: jpcb_112_467 # CO2 parameters are from: cjce_17_268 # CO3 parameters are from: http://cinjweb.umdnj.edu/~kerrigje/data/par_all36_cgenff.prmi, I made up charges here # HCO3 parameters are made up # H2CO3 parameters are made up # Reformat the types if not rearrange and not env: atypes = {} for i in types: tmp = i.split() atypes[tmp[1]] = tmp[0] # /\/\/\/\/\/\/\/\ # Collect XYZ file # /\/\/\/\/\/\/\/\ xyz = collect(fh) if rearrange: # Number of atoms natoms = xyz['NATOMS'] # Determine location of central atoms central = [[0,0]] # Index of central atom, number of atoms in molecule for i,atom in enumerate(xyz['ATOMS']): if i > 3: # Assumes the species is a carbonate if atom is 'O': central.append([i,3]) # Assumes that subsequent molecules are H2O nmol = len(central) central[0][1] = central[1][0] - central[0][0] # Determine distance between the central atom in solute and solvent central atoms. # Note that this is actually the squared distance, to avoid a square root. distance = [] for i,l in enumerate(central): if i == 0: continue else: j = l[0] d = ( ( xyz['COORDS'][j][0] - xyz['COORDS'][0][0] )**2 + ( xyz['COORDS'][j][1] - xyz['COORDS'][0][1] )**2 + ( xyz['COORDS'][j][2] - xyz['COORDS'][0][2] )**2 ) distance.append(d) # Number of distances ndist = len(distance) # Connection of indices between the original coordinates and sorted coordinates indices = sort_coords(distance,nmol,ndist) # Sort coordinates satoms = [0.0 for i in range(natoms)] scoords = [0.0 for i in range(natoms)] for i in range(len(central)): m = indices[i][0] n = indices[i][1] j = central[n][0] k = j + central[n][1] p = central[m][0] for o in range(j,k): satoms[o] = xyz['ATOMS'][p] scoords[o] = xyz['COORDS'][p] p += 1 # Sort the distances sdistance = sorted(distance) # Print values to file if within: fmt = '{0} {1:14.8f} {2:14.8f} {3:14.8f}' tmp = str(within).split('.') # Determine the number of atoms in each region nwithin = 1 noutside = 0 for dist in sdistance: if dist <= within2: nwithin += 1 else: noutside += 1 natin = 6 + 3*nwithin natout = natoms - natin # Check that the numbers of molecules in both regions is correct if nwithin + noutside != nmol: sys.exit('Number of atoms in each region is incorrect') # Name and open files fname1 = fh.split('.')[0] + '_leq' + tmp[0] + 'pt' + tmp[1] + '.xyz' fname2 = fh.split('.')[0] + '_gt' + tmp[0] + 'pt' + tmp[1] + '.xyz' f1 = open(fname1, 'w') f2 = open(fname2, 'w') # Write data to files print(natin, file=f1) print(natout, file=f2) print('', file=f1) print('', file=f2) for i in range(natin): print(fmt.format(satoms[i], scoords[i][0], scoords[i][1], scoords[i][2]), file=f1) for i in range(natin,natoms): print(fmt.format(satoms[i], scoords[i][0], scoords[i][1], scoords[i][2]), file=f2) else: fmt = '{0} {1:14.8f} {2:14.8f} {3:14.8f}' fname = 'newcoords.xyz' f = open(fname, 'w') print(natoms, file=f) print('', file=f) for i in range(natoms): print(fmt.format(satoms[i], scoords[i][0], scoords[i][1], scoords[i][2]), file=f) elif env: # /\/\/\/\/\/\/\/\/\/\/\/\/\ # Write a CP2K QMMM env file # /\/\/\/\/\/\/\/\/\/\/\/\/\ # Conversion table table = { 'H' : 'Hqm', 'C' : 'Cqm', 'O' : 'Oqm', } # Convert QM atoms to CP2K types cp2katoms = [] for atom in xyz['ATOMS']: cp2katoms.append(table[atom]) # Write the env file with the QM atom identification envname = fh.split('.')[0] + '.env' envfile = open(envname, 'w') for i in range(len(cp2katoms)): print('&QM_KIND ' + cp2katoms[i], file=envfile) print('MM_INDEX ' + str(i+1), file=envfile) print('&END QM_KIND', file=envfile) else: # /\/\/\/\/\/\/\/\/\/\/ # Output formatted data # /\/\/\/\/\/\/\/\/\/\/ # Atoms block print('Atoms') print() # Locate the central atom. This defines the start of a molecule dcentral ={} # Break down the element list newstart = 0 for m in molecules: # Store the central atom locations dcentral[m] = [] natoms = moldata[m]['Natoms'] catom = moldata[m]['Catom'] # Check if there are multiple "central" atoms mcent = moldata[m]['Mult_cent'] for i in range(newstart,len(xyz['ATOMS']),natoms): c = xyz['ATOMS'][i] # For molecules with multiple centers, do this if mcent: dcentral[m].append(i) newstart = i + natoms break else: if c == catom: keep = [] # Determine terminal atoms. k = 0 for j in range(i+1,i+natoms): if xyz['ATOMS'][j] == moldata[m]['Connect'][k]: keep.append(True) else: keep.append(False) k += 1 # Check if we have a molecule if False in keep: continue else: dcentral[m].append(i) newstart = i + natoms # Change atom types for easier sorting later if m == 'H2O': xyz['ATOMS'][i] = 'OW' xyz['ATOMS'][i+1] = 'HW' xyz['ATOMS'][i+2] = 'HW' elif m == 'H3O': xyz['ATOMS'][i] = 'OH' xyz['ATOMS'][i+1] = 'HH' xyz['ATOMS'][i+2] = 'HH' xyz['ATOMS'][i+3] = 'HH' # Start printing data fmt = '{0:>4} {1:>4} {2:>2} {3:11.4e} {4:11.4e} {5:11.4e} {6:11.4e}' counter = 1 # Counter for atoms mnum = 1 # Counter for molecules morder = [] # Order of molecules ncentral = {} # New index of central atom. for m in molecules: morder.append(m) ncentral[m] = [] for i in dcentral[m]: ln = [] # Add counter for atoms ln.append(str(counter)) # Append to the new counter ncentral[m].append(counter) # Add counter for molecules ln.append(str(mnum)) # Add atomtype ln.append(atypes[xyz['ATOMS'][i]]) # Add in atomic charge ln.append(moldata[m][xyz['ATOMS'][i]]) # Add in coordinates ln.append(xyz['COORDS'][i][0]) ln.append(xyz['COORDS'][i][1]) ln.append(xyz['COORDS'][i][2]) # Print the central atom print(fmt.format(ln[0], ln[1], ln[2], ln[3], ln[4], ln[5], ln[6])) # Repeat the process for the terminal atoms for j in range(i+1,i+moldata[m]['Natoms']): counter += 1 ln = [] ln.append(str(counter)) ln.append(str(mnum)) ln.append(atypes[xyz['ATOMS'][j]]) ln.append(moldata[m][xyz['ATOMS'][j]]) ln.append(xyz['COORDS'][j][0]) ln.append(xyz['COORDS'][j][1]) ln.append(xyz['COORDS'][j][2]) print(fmt.format(ln[0], ln[1], ln[2], ln[3], ln[4], ln[5], ln[6])) # Change molecule number mnum += 1 # Change atom number counter += 1 # Bonds block # Note that I assume bond type 1 belongs to water. print() print('Bonds') print() fmt = '{0:>4} {1:>4} {2:>4} {3:>4}' bnum = 1 for m in morder: if m == 'H2O': btype = 1 else: btype = 2 for i in ncentral[m]: ln = [] ln.append(bnum) ln.append(btype) ln.append(i) for j in range(i+1,i+moldata[m]['Natoms']): ln.append(j) print(fmt.format(ln[0], ln[1], ln[2], ln[3])) ln.pop() bnum += 1 ln[0] = bnum # Angles block # Again, I assume that angle type 1 belongs to water print() print('Angles') print() # Keep in mind that the order of print out is terminal, central, terminal fmt = '{0:>4} {1:>4} {2:>4} {3:>4} {4:>4}' anum = 1 # This part will probably need to be revised for molecules with more than 3 atoms for m in morder: if m == 'H2O': a = 1 else: a = 2 for i in ncentral[m]: ln = [0,0,0,0,0] ln[0] = anum ln[1] = a ln[3] = i ln[2] = i+1 ln[4] = i+2 #t = 2 #for j in range(i+1,i+moldata[m]['Natoms']): # ln[t] = j # t += 2 # Not the smartest way to make this work, but it will do. nangles = moldata[m]['NAngles'] for j in range(nangles): print(fmt.format(ln[0], ln[1], ln[2], ln[3], ln[4])) anum += 1
def __init__(self, fresh=True, save=True, temperature=True, aperture=True, cpbs=True): self.md2 = PyTango.DeviceProxy('i11-ma-cx1/ex/md2') self.imag = PyTango.DeviceProxy('i11-ma-cx1/ex/imag.1') #self.lima = TangoLimaVideo.TangoLimaVideo() #self.lima.tangoname = 'lima/limaccd/1' self.lima = PyTango.DeviceProxy('lima/limaccd/1') self.md2bp = PyTango.DeviceProxy('i11-ma-cx1/ex/md2-beamposition') self.thermometres = dict([(therm, PyTango.AttributeProxy(therm)) for therm in self.therm_attributes]) self.zooms = range(1, 11) self.collect = Collect.collect() # exposures more appropriate with lower flux: ~5% of transmission #self.exposures = {1: 0.021, #2: 0.021, #3: 0.021, #4: 0.021, #5: 0.021, #6: 0.021, #7: 0.021, #8: 0.021, #9: 0.021, #10: 0.021} self.exposures = {1: 0.005, 2: 0.003, 3: 0.002, 4: 0.002, 5: 0.002, 6: 0.002, 7: 0.002, 8: 0.002, 9: 0.003, 10: 0.003} #Chip mode (without cryo) #self.exposures = {1: 0.01, #2: 0.01, #3: 0.01, #4: 0.01, #5: 0.02, #6: 0.02, #7: 0.02, #8: 0.05, #9: 0.05, #10: 0.05} #8 bunch #self.exposures = {1: 0.05, #2: 0.01, #3: 0.01, #4: 0.01, #5: 0.01, #6: 0.01, #7: 0.01, #8: 0.01, #9: 0.01, #10: 0.01} #1 bunch #self.exposures = {1: 0.21, #2: 0.1, #3: 0.1, #4: 0.1, #5: 0.1, #6: 0.1, #7: 0.2, #8: 0.2, #9: 0.25, #10: 0.25} #self.exposures = {1: 1, #2: 1, #3: 1, #4: 1, #5: 1, #6: 1, #7: 1, #8: 1, #9: 1, #10: 1} self.fresh = fresh self.save = save self.images = {} self.cData = {} self.beamposition = {} self.infostore = {} self.timestamp = time.time()
import sys import pickle sys.path.append('C:/marcworking/pythonlib') import numpy as np import collect as coll #####Dealwith 1T1S_no anchorage d1=coll.collect('data') d1.import_cvs('original/UHPC_deck/s_1t1s_noanchorage.csv') print d1.database.keys() ### use three strain gauges results to calculate the average middle span deflection defl=(d1.database['S.Pot-2'][1]+d1.database['S.Pot-3'][1]+d1.database['S.Pot-1'][1])/3 d1.add_para('average_defl',[d1.database['S.Pot-2'][0],defl]) d1.export_pydat('Exp/s_1t1s_noanchorage') #####Dealwith 1T1S_endplate d2=coll.collect('data') d2.import_cvs('original/UHPC_deck/s_1t1s_steelplate.csv') print d2.database.keys() defl=(d2.database['S.Pot-2'][1]+d2.database['S.Pot-3 (mid)'][1]+d2.database['S.Pot-1'][1])/3 d2.add_para('average_defl',[d2.database['S.Pot-2'][0],defl]) d2.export_pydat('Exp/s_1t1s_steelplate') #####Dealwith 1T1S_endplate d3=coll.collect('data') d3.import_cvs('original/UHPC_deck/s_1t1s_hook.csv') print d3.database.keys() defl=(d3.database['S.Pot-4'][1]+d3.database['S.Pot-3'][1]+d3.database['S.Pot-1'][1])/3 d3.add_para('average_defl',[d3.database['S.Pot-1'][0],defl])
return plot def rolling_average(data, count): last = [0]*count output = [] for point, value in data: last.pop() last.insert(0, value) output.append((point, float(sum(last))/count)) return output def xy(data): return ([x[0] for x in data],[y[1] for y in data]) if __name__ == "__main__": activity = collect() for where in activity: print where for who in activity[where]: print " %s" % who print " compress data" data = compress_data(activity[where][who], pytz.utc.localize(datetime(2002, 1, 1)), timedelta(days=31)) print " generate rolling average" rlavg = rolling_average(data, 3) print " plot data" plt.bar(*xy(data), width=[datetime.min+timedelta(days=30)]*len(data)) print " plot rolling average" plt.plot(*xy(rlavg), color='red') filename = '%s-%s-2002-1m-3m-avg.png' % (where, who.encode('ascii','xmlcharrefreplace')) print " save to %s" % filename plt.savefig(filename, transparent=True, bbox_inches='tight')
def main(): """\ Script for reading a LAMMPS files. These can be output (*.out), log (log.lammps, for instance), or data files. """ from argparse import ArgumentParser, RawDescriptionHelpFormatter from textwrap import dedent parser = ArgumentParser(description=dedent(main.__doc__), formatter_class=RawDescriptionHelpFormatter) parser.add_argument('--version', action='version', version='%(prog)s 1.0') parser.add_argument('-o', '--output', help='Output the data to a table.', action='store_true', default=False) parser.add_argument('-p', '--plot', help='Plot the data.', action='store_true', default=False) parser.add_argument('-a', '--atomtypes', help='The atom type numbers you want converted.' ' These should be written in "# element" format.', nargs='+') parser.add_argument('-k', '--cp2k', help='Create coordinate blocks for CP2K.', action='store_true', default=False) parser.add_argument('files', help='The files that you want converted.' , nargs='+') args = parser.parse_args() # Store command line arguments as convenient variables fh = args.files[0] output = args.output plot = args.plot atable = args.atomtypes cp2k = args.cp2k if output is False and plot is False: msg = "Please request output or plotting. Use -h to see what is available" error = '\n' + len(msg)*'%' + '\n' + msg + '\n' + len(msg)*'%' + '\n' sys.exit(error) # Convert the table to a dictionary if cp2k: convert = {} for item in atable: item = item.split() convert[item[0]] = item[1] # Collect all data into memory for data retention thermodata = collect(fh) # Strings to search for table = {'Define variables' : 'VARIABLES', 'Define units' : 'UNITS', 'Define LJ potential input' : 'LJPOTENTIAL', 'Define Potential Types' : 'POTENTIALTYPES', 'Define Electrostatics' : 'ELECTROSTATICS', 'Read Data for System' : 'READDATA', 'Coefficients for Bond Potentials' : 'BONDS', 'Coefficients for Angle Potentials' : 'ANGLES', 'Coefficients for LJ Potential' : 'LJCOEFF', 'Parameters for Building Pairwise Neighbor Lists' : 'NEIGHBORS', 'Define Timestep Size for MD' : 'TIMESTEP', 'Define the Thermodynamics Output' : 'THERMOUT', 'Define Operations Applied to the System' : 'FIXES', 'Define Extra Output' : 'DUMPS', 'Restart File Information' : 'RESTART', 'Run Dynamics' : 'RUNDYN', 'End of Job' : 'END'} # Determine which units were used. This is important for correct output. units_table = {'REAL' : { 'Mass' : r'g/mol', 'Distance' : r'\AA', 'Time' : r'fs', 'Energy' : r'kcal/mol', 'Velocity' : r'\AA/fs', 'Force' : r'kcal/(mol*\AA)', 'Torque' : r'kcal/mol', 'Temperature' : r'K', 'Pressure' : r'atm', 'Dynamic Viscosity' : r'Poise', 'Charge' : r'e', 'Dipole' : r'e*\AA', 'Electric Field' : r'V/\AA', 'Density' : r'g/cm$^3$' }, 'METAL' : { 'Mass' : r'g/mol', 'Distance' : r'\AA', 'Time' : r'ps', 'Energy' : r'eV', 'Velocity' : r'\AA/ps', 'Force' : r'eV/\AA', 'Torque' : r'eV', 'Temperature' : r'K', 'Pressure' : r'bars', 'Dynamic Viscosity' : r'Poise', 'Charge' : r'e', 'Dipole' : r'e*\AA', 'Electric Field' : r'V/\AA', 'Density' : r'g/cm$^3$' }, 'SI' : { 'Mass' : r'kg', 'Distance' : r'm', 'Time' : r's', 'Energy' : r'J', 'Velocity' : r'm/s', 'Force' : r'N', 'Torque' : r'N*m', 'Temperature' : r'K', 'Pressure' : r'Pa', 'Dynamic Viscosity' : r'Pa*s', 'Charge' : r'C', 'Dipole' : r'C*m', 'Electric Field' : r'V/m', 'Density' : r'kg/cm$^3$' }, 'CGS' : { 'Mass' : r'g', 'Distance' : r'cm', 'Time' : r's', 'Energy' : r'ergs', 'Velocity' : r'cm/s', 'Force' : r'dynes', 'Torque' : r'dyne*cm', 'Temperature' : r'K', 'Pressure' : r'dyne/cm^2', 'Dynamic Viscosity' : r'Poise', 'Charge' : r'esu', 'Dipole' : r'esu*cm', 'Electric Field' : r'dyne/esu', 'Density' : r'g/cm$^3$' }, 'ELECTRON' : { 'Mass' : r'amu', 'Distance' : r'Bohr', 'Time' : r'fs', 'Energy' : r'Hartrees', 'Velocity' : r'Bohr/(atomic time unit)', 'Force' : r'Hartree/Bohr', 'Temperature' : r'K', 'Pressure' : r'Pa', 'Charge' : r'e', 'Dipole' : r'Debye', 'Electric Field' : r'V/cm', }, } # Table of quantities for performing averages comp_table = ( 'Temp', 'TotEng', 'PotEng', ) # Output some information to screen if output: if thermodata['FILETYPE'] == 'LAMMPS Logfile': # Define the units units = thermodata['UNITS'] # thermotable converts between the keys in thermodata to column headings. thermotable = { 'Step' : 'Time (' + units['Time'] + ')', 'Temp' : 'Temperature (' + units['Temperature'] + ')', 'PotEng' : 'Potential Energy (' + units['Energy'] + ')', 'TotEng' : 'Total Energy (' + units['Energy'] + ')', } # Generate the column heading format and data format fmt = '|{0:20}|' datafmt = '|{0:20}|' count = 1 for i in range(len(thermodata.keys())): if thermodata.keys()[i] == 'Step': count = count elif thermodata.keys()[i] in comp_table: fmt += '{' + str(count) + ':^30}|' datafmt += '{' + str(count) + ':^30.3f}|' count += 1 # Generate the column headings head = [''] avg = ['Average'] stdev = ['Standard Deviation'] for key in thermodata.keys(): if key in comp_table: head.append(thermotable[key]) avg.append(thermodata[key][2]) stdev.append(thermodata[key][3]) print() print('='*len(fmt.format(*head))) print(fmt.format(*head)) print('='*len(fmt.format(*head))) print(datafmt.format(*avg)) print(datafmt.format(*stdev)) print('='*len(fmt.format(*head))) print() elif thermodata['FILETYPE'] == 'LAMMPS Datafile': coords = [] if 'Velocities' in thermodata.keys(): velocities = [] natoms = thermodata['Num_atoms'] atoms = [] for i in range(natoms): ix = str(i+1) atoms.append(convert[str(thermodata['Coords'][ix][0])]) coords.append([ thermodata['Coords'][ix][1], thermodata['Coords'][ix][2], thermodata['Coords'][ix][3] ]) if 'Velocities' in thermodata.keys(): velocities.append([ thermodata['Velocities'][ix][0], thermodata['Velocities'][ix][1], thermodata['Velocities'][ix][2] ]) # Print coordinates print('&COORD') lnstyle = ' {0:2} {1:13.8f} {2:13.8f} {3:13.8f}' for i in range(natoms): print(lnstyle.format(atoms[i], coords[i][0], coords[i][1], coords[i][2])) print('&END COORD') # Print velocities lenconv = 0.5291772 # Angstrom / Bohr timeconv = 2.418884326505e-2 # fs / a.t.u conv = timeconv / lenconv if 'Velocities' in thermodata.keys(): # Velocity units are by default atomic units. We need # to convert from values in LAMMPS as a result for CP2K. print('&VELOCITY') lnstyle = ' {0:13.8f} {1:13.8f} {2:13.8f}' for i in range(natoms): print(lnstyle.format(conv*velocities[i][0], conv*velocities[i][1], conv*velocities[i][2])) print('&END VELOCITY')
def takeoff ( tel, yr, mn, dy, run=None, day=None, obs_begin=None, obs_end=None, moon_dis_limit=50.0, airmass_limit=1.75, ha_limit=4.0, overwrite=False, simulate=False, check=False ) : """ Generate observation script args: tel: telescope code yr: year of obs date, 4-digit year mn: year of obs date, 1 to 12 dy: day of obs date, 0 to 31, or extended run: run code, default is `yyyymm` day: day code, default is `Jxxxx` obs_begin: obs begin hour, float, default 1.25 hours after sunset obs_end, obs end hour, float, default, 1.25 hours before sunrise moon_dis_limit: limit of distance of good field to the moon, default 50 deg airmass_limit: limit of airmass, default 1.75, but for pole area, this should be greater ha_limit: limit of hour angle, default 3.0, should be greater for pole area overwrite: bool, when output dir already exists, overwrite or not simulate: bool, generate a obsed list or not check: bool, if check is true, will report for each block's selection """ # all var starts with `rep_` contains report info rep_start_time = Time.now() if not os.path.isdir(tel) or not os.path.isfile(tel+"/conf/basic.txt") : tea(None, common.msg_box().box( ["Telescope `{tel}` does NOT EXIST!!".format(tel=tel)], title="ERROR", border="*")) return # load site and telescope basic data site = schdutil.load_basic(tel) # airmass lower limit, set this to avoid zenith airmass_lbound = 1.005 # about 84 deg twi_alt = -15.0 # twilight altitude of sun # night parameters # mjd of 18:00 of site timezone, as code of tonight mjd18 = common.sky.mjd_of_night(yr, mn, dy, site) # mjd of local midnight, as calculate center mjd24 = common.sky.mjd(yr, mn, dy, 24 - site.lon / 15.0, 0, 0, 0) # local sidereal time of midnight lst24 = common.sky.lst(mjd24, site.lon) # timezone correction: between local time and timezone standard time tzcorr = site.tz - site.lon / 15.0 # observation start and end time, in timezone time sunset_mjd, sunrise_mjd = common.sky.sun_action (mjd24, lst24, site.lat, 0.0) twi_begin_mjd, twi_end_mjd = common.sky.sun_action (mjd24, lst24, site.lat, twi_alt) sunset_hour = common.sky.mjd2hour(sunset_mjd, site.tz) sunrise_hour = common.sky.mjd2hour(sunrise_mjd, site.tz) + 24.0 twi_begin_hour = common.sky.mjd2hour(twi_begin_mjd, site.tz) twi_end_hour = common.sky.mjd2hour(twi_end_mjd, site.tz) + 24.0 # if obs time is given, use given time if obs_begin is None or np.isnan(obs_begin) : obs_begin = twi_begin_hour else : if obs_begin < 12.0 : obs_begin += 24 if obs_end is None or np.isnan(obs_end) : obs_end = twi_end_hour else : if obs_end < 12.0 : obs_end += 24 # moon position at midnight, as mean coord to calculate moon-object distance mpos = common.sky.moon_pos(mjd24) #astropy.coordinates.get_moon(tmjd24) mphase = common.sky.moon_phase(mjd24) # sun position at midnight spos = common.sky.sun_pos(mjd24) #astropy.coordinates.get_sun(tmjd24) ###################################################################################### # default run name rule if run is None or run == "" : run = "{year:04d}{month:02d}".format(year=yr, month=mn) if day is None or day == "" : day = "J{mjd:0>4d}".format(mjd=mjd18) daystr = "{year:04d}.{month:02d}.{day:02d}".format(year=yr, month=mn, day=dy) # schedule dir daypath = "{tel}/schedule/{run}/{day}/".format(tel=tel, run=run, day=day) if os.path.isdir(daypath) : if not overwrite : tea(None, common.msg_box().box( ["Schedule dir already exists.", "If you want to overwrite, please set `overwrite=True`"], title="ERROR", border="*")) return os.system("mkdir -p " + daypath) if not os.path.isdir(daypath) : tea(None, common.msg_box().box( "Can NOT make schedule dir `{}`".format(daypath), title="ERROR", border="*")) ###################################################################################### # output filename or filename format rep_fn = "{path}report.{mjd:04}.{days}.txt" .format(path=daypath, mjd=mjd18, days=daystr) sumb_fn = "{path}sumblock.{mjd:04}.{days}.txt".format(path=daypath, mjd=mjd18, days=daystr) sumf_fn = "{path}sumfield.{mjd:04}.{days}.txt".format(path=daypath, mjd=mjd18, days=daystr) plan_fn = "{path}plan.{mjd:04}.{days}.txt" .format(path=daypath, mjd=mjd18, days=daystr) eps_fn = "{path}plan.{mjd:04}.{days}.eps" .format(path=daypath, mjd=mjd18, days=daystr) png_fn = "{path}plan.{mjd:04}.{days}.png" .format(path=daypath, mjd=mjd18, days=daystr) chk_fn_fmt = "{path}chk.{mjd:04}.{sn:02}.{bname}.txt".format see_fn_fmt = "{path}see.{mjd:04}.{sn:02}.{bname}.png".format scr_fn_fmt = "{path}scr.{mjd:04}.{sn:02}.{bname}.txt".format rep_f = open(rep_fn, "w") tea(rep_f, " --------======== Start : {} ========--------\n".format(rep_start_time.iso)) # load fields and plan plans = schdutil.load_expplan(tel) fields = schdutil.load_field(tel) active_plans = {p:plans[p] for p in plans if plans[p].active} # find all obsed file, and mark them skipfile = "{tel}/obsed/skip.lst".format(tel=tel) obsedlist = schdutil.ls_files("{tel}/obsed/*/obsed.J*.lst".format(tel=tel)) schdutil.load_obsed(fields, obsedlist, plans, skipfile=skipfile) afields = np.array(list(fields.values())) ara = np.array([f.ra for f in afields]) ade = np.array([f.de for f in afields]) # mark fields near moon and sun moon_dis = common.sky.distance(mpos.ra, mpos.dec, ara, ade) for f in afields[np.where(moon_dis < moon_dis_limit)]: f.tag |= 0x10 sun_dis = common.sky.distance(spos.ra, spos.dec, ara, ade) for f in afields[np.where(sun_dis < 60)]: f.tag |= 0x10 atag = np.array([f.tag for f in afields]) # keep only unfinished fields, and must away from moon newfield = afields[np.where(atag <= 1)] # count histogram n_tag = len(afields) n_tag_01 = sum((atag == 0x00) | (atag == 0x01)) n_tag_2 = sum((atag == 0x02) | (atag == 0x12)) n_tag_10 = sum((atag == 0x10) | (atag == 0x11)) n_tag_1f = sum((atag == 0x1F)) # blocks and unique blocks newfieldblock = np.array([f.bk for f in newfield]) newblockset = set(newfieldblock) n_block = len(newblockset) # block parameter newblock = {} for b in newblockset : f_in_b = newfield[np.where(newfieldblock == b)] newblock[b] = schdutil.block_info(b, f_in_b) # show prepare message tea(rep_f, common.msg_box().box([ "## {tel}, on {days} (J{mjd:04}) of run `{run}`". format(tel=tel,days=daystr,mjd=mjd18, run=run), "Sun set at {s:5}, rise at {r:5}, obs time is {os:5} ==> {oe:5}". format( s=common.angle.hour2str(sunset_hour), r=common.angle.hour2str(sunrise_hour-24.0), os=common.angle.hour2str(obs_begin), oe=common.angle.hour2str(obs_end-24.0)), "Obs hours is {ol:5}, LST of midnight is {mst:5}". format(ol=common.angle.hour2str(obs_end-obs_begin), mst=common.angle.hour2str(lst24)), "Moon mean position is {ra:11} {de:11}, phase is {ph:4.1%}". format(ra=common.angle.dec2hms(mpos.ra), de=common.angle.dec2dms(mpos.dec), ph=mphase), ("Simulation included" if simulate else "No simulation"),], title="Night General Info", align="^<<<>")) tea(rep_f, common.msg_box().box([ "{:<20} {:>5} | {:<20} {:>5}".format("All Fields", n_tag, "X: Skipped", n_tag_1f), "{:<20} {:>5} | {:<20} {:>5}".format("X: Finished", n_tag_2, "X: Near Moon/Sun", n_tag_10), "{:<20} {:>5} | {:<20} {:>5}".format("Available Fields", n_tag_01, "Available Blocks", n_block) ], title="Fields Count", align="^^")) ###################################################################################### # start to make schedule clock_now = obs_begin lst_clock = lambda c : (lst24 + c - tzcorr) % 24.0 # lst of start, use lst_clock(clock_now) to call this tea(rep_f, "Begin to schedule from {clock}, LST {lst}\n".format( clock=common.angle.hour2str(clock_now), lst=common.angle.hour2str(lst_clock(clock_now)))) # define a lambda rank function rank = lambda aa : aa.argsort().argsort() # simulation working if simulate : simu_path = "{tel}/obsed/{run}/".format(tel=tel, run=run) os.system("mkdir -p " + simu_path) simu_check_fn = simu_path + "check.J{mjd:04d}.lst".format(mjd=mjd18) sim_f = open(simu_check_fn, "w") tea(rep_f, "Simulation file: " + simu_check_fn) else : sim_f = None # format of output rep_tit = "{clock:5} [{lst:^5}] {sn:2} | {bn:^7} ({ra:^9} {de:^9}) | {airm:4} {az:>5} {alt:>5} | {btime:>5}".format( sn="No", bn="Block", ra="RA", de="Dec", airm="Airm", clock="Time", lst="LST", az="Az", alt="Alt", btime="Cost") rep_fmt = "{clock:5} [{lst:5}] #{sn:02} | {bn:7} ({ra:9.5f} {de:+9.5f}) | {airm:4.2f} {az:5.1f} {alt:+5.1f} | {btime:>4d}s".format rep_war = "**: {skip:>7} minutes SKIPPED !! {skipbegin:5} ==> {clock:5} [{lst:5}]".format sum_tit = "#{mjd:3} {clock:5} {lst:^5} {sn:>2} {bn:^7} {ra:^9} {de:^9} {airm:4} {az:>5} {alt:>5} {btime:>4}\n".format( mjd="MJD", clock="Time", lst="LST", sn="No", bn="Object", ra="RA", de="Dec", airm="Airm", az="Az", alt="Alt", btime="Cost") sum_fmt = "{mjd:04d} {clock:5.2f} {lst:5.2f} {sn:2d} {bn:7} {ra:9.5f} {de:+9.5f} {airm:4.2f} {az:5.1f} {alt:+5.1f} {btime:>4d}\n".format chk_fmt = "{ord:03d} {bn:7s} ({ra:9.5f} {de:+9.5f}) {airm:4.2f} {ha:5.2f} {az:5.1f} {alt:+5.1f} {key:>5.1f} {other}\n".format chk_tit = "#{ord:>2} {bn:^7} ({ra:>9} {de:>9}) {airm:>4} {ha:5} {az:>5} {alt:>5} {key:>5} {other}\n".format( ord="No",bn="Block", ra="RA", de="Dec", airm="Airm", ha="HA", az="Az", alt="Alt", key="Key", other="Other") scr_fmt = (site.fmt + "\n").format tea(rep_f, rep_tit) sumb_f = open(sumb_fn, "w") sumf_f = open(sumf_fn, "w") plan_f = open(plan_fn, "w") sumb_f.write(sum_tit) sumf_f.write(sum_tit) # init before loop block_sn = 0 # block sn, count blocks, and also for output skip_begin = None # time begin to skip, when no good block available span_skip = 1.0 / 60.0 # how long skipped each loop skip_count = 0 skip_total = 0.0 exp_airmass = [] # collect airmass lra, lde = float("nan"), float("nan") # last field ra, dec njump = 0 keyweight = [1, 1, 1] # time need of a full round for a field plan_time = sum([(plans[p].expt + site.inter) * plans[p].repeat for p in active_plans]) / 3600.0 ###################################################################################### while clock_now < obs_end : lst_now = lst_clock(clock_now) if len(newblock) == 0 : # no available block, print error message, and end procedure skip_begin = clock_now break bra = np.array([b.ra for b in newblock.values()]) bde = np.array([b.de for b in newblock.values()]) bsize = np.array([len(b.fields) for b in newblock.values()]) bname = np.array(list(newblock.keys())) # assume all fields need a full round, this is estimated center lst blst = lst_now + plan_time * bsize / 2.0 # calculate airmass for all available block ha = common.angle.angle_dis(blst * 15.0, bra) / 15.0 airm = common.sky.airmass(site.lat, blst, bra, bde) baz, balt = common.sky.azalt(site.lat, blst, bra, bde) # keep blocks with airm < airmlimit & hour angle < ha_limit, and then dec < min dec + 4 deg ix_1 = np.where((airm < airmass_limit) & (airm > airmass_lbound) & (np.abs(ha) < ha_limit)) # no available block handler if len(ix_1[0]) == 0 : # no good block, have bad block # maybe not good time, skip 5 min and check again # set skip mark, if found new block, print warning for skipped time if skip_begin is None : skip_begin = clock_now clock_now += span_skip continue elif skip_begin is not None : # found good block, but before this, some time skipped, print a warning tea(rep_f, rep_war( skip=int((clock_now - skip_begin) * 60), skipbegin=common.angle.hour2str(skip_begin), clock=common.angle.hour2str(clock_now), lst=common.angle.hour2str(lst_now) )) sumf_f.write(sum_fmt(mjd=mjd18, clock=common.angle.hour2str(skip_begin), sn=0, bn="SKIP!!!", ra=0.0, de=0.0, airm=0.0, az=0.0, alt=0.0, lst=common.angle.hour2str(lst_now), btime=int(int((clock_now - skip_begin) * 3600)) )) skip_count += 1 skip_total += clock_now - skip_begin skip_begin = None # add 2nd condition, dec <= min dec + 4 ix_2 = np.where((airm < airmass_limit) & (airm > airmass_lbound) & (np.abs(ha) < ha_limit) & (bde <= bde[ix_1].min() + 4.0)) # make key for each block, key = airmass rank + ra rank + de rank airm_2, ha_2 = airm[ix_2], ha[ix_2] baz_2, balt_2 = baz[ix_2], balt[ix_2] bra_2, bde_2, bname_2 = bra[ix_2], bde[ix_2], bname[ix_2] # added 20160903, distance to previous point, make move smaller # forward has more priority if not np.isnan(lra) : bdis_2 = bde_2 - lde ix_e = np.where(bra_2 >= lra) ix_w = np.where(bra_2 < lra) bdis_2[ix_e] += 0.5 * (bra_2[ix_e] - lra) bdis_2[ix_w] += 1.0 * (lra - bra_2[ix_w]) rdis_2 = rank(bdis_2) #rdis_2 = 0 else : rdis_2 = 0 # key formular is MOST important ############################################################ key_2 = rank(airm_2) + rank(-ha_2) + rank(bde_2) + rdis_2 #key_2 = stdscore(airm_2) * keyweight[0] + stdscore(-ha_2) * keyweight[1] + stdscore(bde_2) * keyweight[2] ############################################################ # choose the best block ix_best = key_2.argmin() # the best block bname_best = bname_2[ix_best] block_best = newblock[bname_best] # inc sn block_sn += 1 # mark candidate blocks, just for check plot for b in bname_2 : for f in newblock[b].fields : f.tag |= 0x04 # mark fields in selected block for f in block_best.fields : f.tag = 0x07 # use this code, when clean candidate, it will be back to 3 # script file, using format from configuration jumped = False scr_fn = scr_fn_fmt(path=daypath, mjd=mjd18, sn=block_sn, bname=bname_best) block_time = 0 # time cost for this block, in seconds with open(scr_fn, "w") as scr_f : # script: plan loop, field loop, do only factor < 1 for p in active_plans : for f in block_best.fields : factor_work = 1.0 - f.factor[p] nrepeat = int(np.ceil(factor_work / plans[p].factor)) for i in range(nrepeat) : # output to script and simulate file if max(abs(common.angle.angle_dis(lra, f.ra, 1.0/np.cos(np.deg2rad(lde)))), abs(lde - f.de)) > site.bmove : plan_f.write("\n") # a mark about big move, for bok not for xao njump += 1 jumped = True scr = scr_fmt(e=schdutil.exposure_info.make(plans[p], f)) scr_f.write(scr) plan_f.write(scr) if simulate : sim_f.write("{}\n".format(schdutil.check_info.simulate(plans[p], f))) # summary file clock_field = clock_now + block_time / 3600.0 fairm = f.airmass(site.lat, lst_clock(clock_field)) faz, falt = f.azalt(site.lat, lst_clock(clock_field)) exp_airmass.append(fairm) sumf_f.write(sum_fmt(mjd=mjd18, clock=clock_field, sn=block_sn, bn=f.id, ra=f.ra, de=f.de, airm=fairm, az=faz, alt=falt, lst=lst_clock(clock_field), btime=int(plans[p].expt + site.inter) )) lra, lde = f.ra, f.de # time walking block_time += plans[p].expt + site.inter #plan_f.write("\n") # write a blank line to seperate blocks # generate a check file about selection if check : # check file, list blocks: name, ra, dec, fields, airmass, rank chk_fn = chk_fn_fmt(path=daypath, mjd=mjd18, sn=block_sn, bname=bname_best) with open(chk_fn, "w") as chk_f : chk_f.write(chk_tit) i = 0 for s in key_2.argsort() : i += 1 b = newblock[bname_2[s]] chk_f.write(chk_fmt(ord=i, key=key_2[s], bn=b.bname, ra=b.ra, de=b.de, az=baz_2[s], alt=balt_2[s], airm=airm_2[s], ha=ha_2[s], other="*")) # for b in range(len(newblock)) : # chk_f.write(chk_fmt(ord=0, bn=bname[b], ra=bra[b], de=bde[b], # airm=airm[b], ha=ha[b], key=0.0, other="*" if b in ix_2[0] else " ")) # plot a check map maptitle = "{tel} {day} {now} {jump}".format( tel=tel, day=daystr, now=common.angle.hour2str(clock_now), jump=("*" if jumped else "")) plotmap.plotmap(ara, ade, np.array([f.tag for f in afields]), title=maptitle, pngfile=see_fn_fmt(path=daypath, mjd=mjd18, sn=block_sn, bname=bname_best), mpos=(mpos.ra, mpos.dec), spos=(spos.ra, spos.dec), zenith=(lst_now * 15.0, site.lat)) # clear candidate for b in bname_2 : for f in newblock[b].fields : f.tag &= 0x03 # report & summary tea(rep_f, rep_fmt( sn=block_sn, bn=bname_best, ra=block_best.ra, de=block_best.de, airm=airm_2[ix_best], az=baz_2[ix_best], alt=balt_2[ix_best], clock=common.angle.hour2str(clock_now), lst=common.angle.hour2str(lst_now), btime=int(block_time) )) sumb_f.write(sum_fmt(mjd=mjd18, clock=clock_now, sn=block_sn, bn=bname_best, ra=block_best.ra, de=block_best.de, airm=airm_2[ix_best], az=baz_2[ix_best], alt=balt_2[ix_best], lst=lst_now, btime=int(block_time) )) # remove used block from newblock del newblock[bname_best] # clock forward clock_now += block_time / 3600.0 # handle event: in last time no block available if skip_begin is not None : # found good block, but before this, some time skipped, print a warning tea(rep_f, rep_war( skip=int((clock_now - skip_begin) * 60), skipbegin=common.angle.hour2str(skip_begin), clock=common.angle.hour2str(clock_now), lst=common.angle.hour2str(lst_clock(clock_now)) )) sumf_f.write(sum_fmt(mjd=mjd18, clock=common.angle.hour2str(skip_begin), sn=0, bn="SKIP!!!", ra=0.0, de=0.0, airm=0.0, az=0.0, alt=0.0, lst=common.angle.hour2str(lst_clock(clock_now)), btime=int(int((clock_now - skip_begin) * 3600)) )) skip_count += 1 skip_total += clock_now - skip_begin skip_begin = None ###################################################################################### sumb_f.close() sumf_f.close() plan_f.close() if simulate: sim_f.close() print ("") os.system("unix2dos {}".format(plan_fn)) tea(rep_f, "") # total of schedule tea(rep_f, common.msg_box().box([ "Total {b} blocks, {e} exposures, {t} cost. From {s} to {f}".format( b=block_sn, e=len(exp_airmass), t=common.angle.hour2str(clock_now-obs_begin), s=common.angle.hour2str(obs_begin), f=common.angle.hour2str(clock_now)), "Estimate airmass: {me:5.3f}+-{st:5.3f}, range: {mi:4.2f} -> {ma:4.2f}".format( me=np.mean(exp_airmass), st=np.std(exp_airmass), mi=np.min(exp_airmass), ma=np.max(exp_airmass)), "Big move over {bmove} deg: {njump} jump(s),".format(njump=njump, bmove=site.bmove), "SKIP: {sc} session(s) encounted, {st} wasted.".format( sc=skip_count, st=common.angle.hour2str(skip_total))], title="Summary") ) # plot task map of this night plotmap.plotmap(ara, ade, np.array([f.tag for f in afields]), title="{tel} {days} ({mjd})".format(tel=tel, days=daystr, mjd=mjd18), epsfile=eps_fn, pngfile=png_fn, mpos=(mpos.ra, mpos.dec), spos=(spos.ra, spos.dec) ) # closing report and summary rep_end_time = Time.now() tea(rep_f, "\n --------======== End : {} ========--------\n{:.1f} seconds used.\n".format( rep_end_time.iso, (rep_end_time - rep_start_time).sec)) rep_f.close() # call collect to finish simulation if simulate : collect.collect(tel, yr, mn, dy, run)
from template import * from collect import collect t = 0 dt = 0.5e-12 * 500000 / 1.0e-6 # unit is us N0 = 1.0e5 pFlux0 = 1.0e23 hFlux0 = 1.0e7 val1 = collect("/Diagnostic/", "particleNumber") nx = (val1.shape)[0] x = np.linspace(0, nx * dt, nx) ##inite the fig of matplotlib fig = plt.figure(figsize=(10, 8)) fig.subplots_adjust(top=0.9, bottom=0.1, wspace=0.6, hspace=0.55) #============Total particle number======================================= val1 = collect("/Diagnostic/", "particleNumber") val1 = val1 / N0 val1_1d = np.transpose(val1[:, 0, 0, 0]) val2 = collect("/Diagnostic/", "particleNumber") val2 = val2 / N0 val2_1d = np.transpose(val2[:, 0, 0, 1]) ax0 = fig.add_subplot(3, 1, 1) ax0.yaxis.set_major_formatter(yformatter)
def main(): """\ Script for reading Gaussian input and output files. """ from argparse import ArgumentParser, RawDescriptionHelpFormatter from textwrap import dedent parser = ArgumentParser(description=dedent(main.__doc__), formatter_class=RawDescriptionHelpFormatter) parser.add_argument('--version', action='version', version='%(prog)s 1.0') parser.add_argument('files', help='The files that you want converted.', nargs='+') #parser.add_argument('-cb', '--changebond', help='Change bonds in the molecule.') #parser.add_argument('-ca', '--changeangle', help='Change angles in the molecule.') #parser.add_argument('-cd', '--changeangle', help='Change angles in the molecule.') parser.add_argument('-ch', '--charges', help='Output charges.', action='store_true', default=False) parser.add_argument('-e', '--energies', help='Output energies.', action='store_true', default=False) parser.add_argument('-c', '--coords', help='Output coordinates.', action='store_true', default=False) parser.add_argument('-f', '--forces', help='Output forces.', action='store_true', default=False) parser.add_argument('-tsc', '--tscoords', help='Output coordinates from transition state search.', action='store_true', default=False) parser.add_argument('-u', '--units', help='Units for outputting quantities.', default='au') parser.add_argument('-p', '--plot', help='Use this to make a plot of data.', action='store_true', default=False) parser.add_argument('-v', '--variable', help='Choose the variable for a plot (x-axis) or a dihedral scan.', default=None) parser.add_argument('-ds', '--dihedralscan', help='Choose the min, max, and stride for a dihedral scan.', nargs=3, required=False) parser.add_argument('-ad', '--atomicdipole', help='Output the atomic dipoles from Hirshfeld analysis.', action='store_true', default=False) parser.add_argument('-ap', '--atomicpol', help='Output the atomic polarizabilities.', action='store_true', default=False) parser.add_argument('-fe', '--fitevb', help='Output energy data for FitEVB', action='store_true', default=False) parser.add_argument('-ba', '--bondedatoms', help='Read in bonded atoms', nargs=2, required=False) args = parser.parse_args() # Store command line arguments as convenient variables fh = args.files charges = args.charges energies = args.energies coords = args.coords forces = args.forces tscoords = args.tscoords units = args.units plot = args.plot var = None atomdip = args.atomicdipole atompol = args.atomicpol if args.variable is not None: var = args.variable.upper() if args.dihedralscan is not None: dscan = [ float(args.dihedralscan[0]), float(args.dihedralscan[1]), float(args.dihedralscan[2]) ] else: dscan = [] fitevb = args.fitevb bondedatoms = args.bondedatoms if (plot == True and var == None) or (dscan != [] and var == None): sys.exit('Must use -p and -v flags or -ds and -v flags together!') # Options for plotting # BL = bond length # AN = angle # DH = dihedral plot_opts = ( 'BL1', 'BL2', 'BL3', 'BL4', 'BL5', 'BL6', 'BL7', 'AN1', 'AN2', 'AN3', 'AN4', 'DH1', 'DH2', 'DH3' ) # Check if the requested plot option makes sense if plot: if var not in plot_opts: sys.exit('Cannot print ' + var + '! Must choose a bond length, angle, or dihedral!') # Determine the type of data desired if 'BL' in var: xdata = 'BONDS' elif 'AN' in var: xdata = 'ANGLES' elif 'DH' in var: xdata = 'DIHEDRALS' # Make lists for storing data x = [] y = [] # Variables for storing all quantities related to atomic polarizabilities if atompol: # Electric fields ff = [] # Atomic dipoles ad = [] # Energy label conversion dictionary table = { 'SCF' : 'SCF Energy', 'CORR' : 'Correlation Energy', 'MP2' : 'MP2 Energy', 'PCM' : 'Total Energy + PCM', } # Unit conversion dictionary conv = { 'au' : ['Hartree' , 1.00000000], 'ev' : ['eV' , 27.21138505], 'kcalmol' : ['kcal/mol', 627.50947428], 'kjmol' : ['kJ/mol' , 2625.49964038], 'wvn' : ['cm^{-1}' , 219474.63068 ], } # Conversion between Bohr and Angstroms au2angstrom = 0.52917720859 # Dictionary for converting plot input to code input pdata = { 'BL1' : 0, 'BL2' : 1, 'BL3' : 2, 'BL4' : 3, 'BL5' : 4, 'AN1' : 0, 'AN2' : 1, 'AN3' : 2, 'AN4' : 3, 'DH1' : 0, 'DH2' : 1, 'DH3' : 2, } # /\/\/\/\/\/\/\/\/ # Collect the files # /\/\/\/\/\/\/\/\/ for f in fh: data = collect(f) # /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\ # Perform the action requested by the user # /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\ if data['FILETYPE'] == 'G09_INPUT': if coords: atoms = data['ATOMS'] coords = data['COORDS'] bonds = data['BONDS'] angles = data['ANGLES'] dihedrals = data['DIHEDRALS'] # Perform variable scan if dscan != []: # Equilibrium dihedral deq = dihedrals[pdata[var]] # Build up lists of dihedrals lscan = dscan[0] hscan = dscan[1]+dscan[2] step = dscan[2] pd = [] md = [] for d in np.arange(deq, deq+hscan, step): pd.append(d) for d in np.arange(lscan, deq, step): md.append(d) d = md + pd # Print the files in the positive direction for num in d: string = str(num).split('.') if num < 0.0: string[0] = string[0].replace('-', 'm') fname = string[0] + 'pt' + string[1] + '_' + var.lower() fname += '.g09' fd = open(fname, 'w') for item in data['JOBTYPE']: print(item, file=fd) print('', file=fd) print(data['TITLE'], file=fd) print('', file=fd) print(data['CHARGE_MULT'], file=fd) bline = '{0:2} {1:1} {2:8.6f}' aline = '{0:2} {1:1} {2:8.6f} {3:1} {4:8.4f}' dline = '{0:2} {1:1} {2:8.6f} {3:1} {4:8.4f} {5:1} {6:9.4f}' for i in range(data['NATOMS']): if i == 0: print(atoms[i], file=fd) elif i == 1: print(bline.format(atoms[i], coords[i][0], bonds[i-1]), file=fd) elif i == 2: print(aline.format(atoms[i], coords[i][0], bonds[i-1], coords[i][2], angles[i-2]), file=fd) else: if i-3 == pdata[var]: print(dline.format(atoms[i], coords[i][0], bonds[i-1], coords[i][2], angles[i-2], coords[i][4], num), file=fd) else: print(dline.format(atoms[i], coords[i][0], bonds[i-1], coords[i][2], angles[i-2], coords[i][4], dihedrals[i-3]), file=fd) print('', file=fd) print('', file=fd) fd.close() elif data['FILETYPE'] == 'G09_OUTPUT': # Print energies if energies: en = data['ENERGY'] if plot: if data['BONDS'] == []: sys.exit('Must use file that has coordinates in Z-matrix format!') else: # For FitEVB, we need data in the form of data IDs, rather than # proton transfer coordinate lengths. if fitevb: tmp = f.split('.')[0].upper() x.append(tmp) else: x.append(data[xdata][pdata[var]]) # Determine what the y-axis quantity is if 'PCM' in en.keys(): y.append(en['PCM']*conv[units][1]) elif 'MP2' in en.keys(): y.append(en['MP2']*conv[units][1]) else: y.append(en['SCF']*conv[units][1]) else: # Print the data fmt = '{0:>18} {1:20.8f}' fname = data['FILENAME'] txt = 'Output data for file: ' + fname print() print('='*len(txt)) print(txt) print('='*len(txt)) print() print('Data is output in units of: ', conv[units][0]) print() print(fmt.format(table['SCF'], data['ENERGY']['SCF']*conv[units][1])) # Print post-HF energy if 'MP2' in data['ENERGY'].keys(): print(fmt.format(table['MP2'], data['ENERGY']['MP2']*conv[units][1])) print(fmt.format(table['CORR'], data['ENERGY']['CORR']*conv[units][1])) # Print PCM energy if 'PCM' in data['ENERGY'].keys(): print(fmt.format(table['PCM'], data['ENERGY']['PCM']*conv[units][1])) print() # Print coordinates elif coords: fname = f.split('.')[0] output = fname + '.xyz' foutput = open(output, 'w') fmt = '{0:4} {1:10.6f} {2:10.6f} {3:10.6f}' print(data['NATOMS'], file=foutput) print('', file=foutput) for i in range(data['NATOMS']): print(fmt.format(data['ATOMS'][i], data['CART_COORDS'][i][0], data['CART_COORDS'][i][1], data['CART_COORDS'][i][2]), file=foutput) foutput.close() # Print forces elif forces: fname = f.split('.')[0].upper() output = 'REF_' + fname foutput = open(output, 'w') fmt = '{0:4} {1:12.6f} {2:12.6f} {3:12.6f}' # Conversion between Bohr and Angstroms au2angstrom = 0.52917720859 # Conversion between Hartree and kcal/mol hart2kcalmol = 627.50947428 for i in range(data['NATOMS']): print(fmt.format( i+1, data['FORCES'][i][0] * hart2kcalmol / au2angstrom, data['FORCES'][i][1] * hart2kcalmol / au2angstrom, data['FORCES'][i][2] * hart2kcalmol / au2angstrom), file=foutput) foutput.close() ## Print coordinates from a transition state search #elif tscoords: # output = 'tscoordinates.xyz' # foutput = open(output, 'a') # fmt = '{0:4} {1:10.6f} {2:10.6f} {3:10.6f}' # print(data['NATOMS'], file=foutput) # print('', file=foutput) # for i in range(data['NATOMS']): # print(fmt.format(data['ATOMS'][i], data['CART_COORDS'][i][0], # data['CART_COORDS'][i][1], data['CART_COORDS'][i][2]), file=foutput) # foutput.close() # Print atomic dipoles elif atomdip: field = data['FIELD'] ad = data['ATOMIC_DIPOLE']['HIRSHFELD'] natoms = data['NATOMS'] atoms = data['ATOMS'] fmthead = '{0} {1:6.3f} {2:6.3f} {3:6.3f}' print() print(fmthead.format('External field (a.u.) =', field[0], field[1], field[2])) print() fmtdip = '{0:2} {1:9.6f} {2:9.6f} {3:9.6f}' print(' X Y Z') for i in range(natoms): print(fmtdip.format(atoms[i], ad[i][0], ad[i][1], ad[i][2])) print() # Store values for atomic polarizabilities elif atompol: ff.append(data['FIELD']) ad.append(data['ATOMIC_DIPOLE']['HIRSHFELD']) natoms = data['NATOMS'] atoms = data['ATOMS'] elif charges: # Types of charges charge_types = { 'Merz-Singh-Kollman' : 'Merz-Kollman', 'Mulliken' : 'Mulliken', } for item in data['CHARGE_MODEL'].keys(): ch_type = charge_types[item] # Stash data in a convenient way chg = data['CHARGE_MODEL'][item] atoms = data['ATOMS'] # Heading print() print(ch_type + ' charges') print() fmtchg = '{0:2} {1:9.6f}' for i in range(data['NATOMS']): print(fmtchg.format(atoms[i], chg[i])) print() else: sys.exit('File must be a G09 input or output file') if plot: # Plot data depending on the user's request. # Set to render text with LaTeX, edit font properties, define figure # Write data to file if fitevb: fh = open('ENERGY', 'w') fmt0 = '{0} {1}' fmt = '{0} {1:20.8f}' print(fmt0.format('ZERO', x[y.index(min(y))]), file=fh) for i in range(len(x)): print(fmt.format(x[i], y[i]), file=fh) else: fh = open('potential.data', 'w') fmt = '{0:12.6f} {1:20.8f}' for i in range(len(x)): print(fmt.format(x[i], y[i]), file=fh) # x-label and fitting information for bonds and angles boa = 'False' angle = 'False' if xdata == 'BONDS': xlabel = r'$r-r_{eq}$ (\AA)' boa = 'True' elif xdata == 'ANGLES': xlabel = r'$\theta-\theta_{eq}$ (Degrees)' boa = 'True' angle = 'True' elif xdata == 'DIHEDRALS': xlabel = r'$\phi$ (Degrees)' # Define units for y-axis yunit = conv[units][0] string = dedent('''\ import matplotlib.pyplot as plt from matplotlib.ticker import MultipleLocator import numpy as np from scipy import stats plt.rc('text', usetex=True) plt.rc('font', **{'family':'serif', 'serif': ['Times'], 'size': 30}) #********************************************************* # Fill this in if you want values in terms of displacement #********************************************************* eq = 0.0000 # Load the data fh = 'potential.data' x,y = np.loadtxt(fh,usecols=(0,1),unpack=True) x = x - eq y = y - min(y) if {boa}: x2 = np.linspace(min(x), max(x), 100) y2 = np.polyfit(x, y, 2) y3 = np.poly1d(y2, variable='x') # y3 is the formula of the parabola in the harmonic approach print(y3) if {angle}: # Convert the force constant to radians and print it deg2rad = np.pi/180.0 ktheta = y3.c[0]/(deg2rad*deg2rad) print('{0:17} {1:7.4f}'.format('Force constant = ', ktheta)) # Coefficients # x-axis and y-axis crossing points xmin = -y3.c[1]/(2.0*y3.c[0]) ymin = y3.c[0]*xmin*xmin + y3.c[1]*xmin + y3.c[2] print('{0:13} {1:7.4f}'.format('x-axis min = ', xmin)) print('{0:13} {1:7.4f}'.format('y-axis min = ', ymin)) # Determine the r-squared value for the fit y4 = [] for i in range(len(x)): y4.append(y3(x[i])) slope, intercept, r_value, p_value, std_err = stats.linregress(y,y4) print('{0:12} {1:6.4f}'.format('R-squared = ', r_value*r_value)) # Make a plot fig = plt.figure() sub = fig.add_subplot(111) sub.plot(x, y, 'b', marker='o', linewidth=0) if {boa}: sub.plot(x2, y3(x2), 'g', linewidth=2) fig.subplots_adjust(left=0.12, right=0.90, bottom=0.1, top=0.9) # Title, labels sub.set_xlabel(r'{xlabel}') # Define units for y-axis lab = 'Energy (' + '{yunit}' + ')' sub.set_ylabel(lab) # Axis limits sub.set_xlim([round(min(x),3),round(max(x),3)]) plt.show()\ ''') string = string.replace('{xlabel}', xlabel) string = string.replace('{yunit}', yunit) string = string.replace('{boa}', boa) string = string.replace('{angle}', angle) dataplot = open('plot_potential.mpl.py', 'w') print(string, file=dataplot) if atompol: # Because the systems are spherical, we only need to account for # the diagonal terms of the polarizability # Magnitude of the finite field mff = [] for f in ff: tmp = f[0]*f[0] + f[1]*f[1] + f[2]*f[2] tmp = np.sqrt(tmp) mff.append(tmp) # Get the zero electric field atomic dipoles for i in range(len(mff)): lf = mff[i] if lf == 0.0: zfdip = ad[i] s = i + 1 # Determine the polarizability ap = [[0.0, 0.0, 0.0] for i in range(natoms)] for i in range(s,len(mff)): for a in range(natoms): if ff[i][0] != 0.0: ap[a][0] = ( ad[i][a][0] - zfdip[a][0] ) / ff[i][0] elif ff[i][1] != 0.0: ap[a][1] = ( ad[i][a][1] - zfdip[a][1] ) / ff[i][1] elif ff[i][2] != 0.0: ap[a][2] = ( ad[i][a][2] - zfdip[a][2] ) / ff[i][2] # Determine the polarizability average pol_avg = [] for i in range(natoms): tmp = ap[i][0]*ap[i][0] + ap[i][1]*ap[i][1] + ap[i][2]*ap[i][2] tmp *= 1.0 / 3.0 pol_avg.append(tmp) # Print average polarizabilities fmt = '{0:2} {1:6.2f}' print() print('Atomic polarizabilities in a.u.') print() for i in range(natoms): print(fmt.format(atoms[i], pol_avg[i])) print()
def main(): """\ Script for reading a CP2K output (*.out). """ from argparse import ArgumentParser, RawDescriptionHelpFormatter from textwrap import dedent parser = ArgumentParser(description=dedent(main.__doc__), formatter_class=RawDescriptionHelpFormatter) parser.add_argument('--version', action='version', version='%(prog)s 1.0') parser.add_argument('-e', '--energies', help='Output energies.', action='store_true', default=False) parser.add_argument('-p', '--plot', help='Use this to make a plot of data.', action='store_true', default=False) parser.add_argument('-u', '--units', help='Units for outputting quantities.', default='au') parser.add_argument('-b', '--bondatoms', help='The atoms in the bond for a PT scan.' ' These should be written in a "# #" pair.', required=False) parser.add_argument('files', help='The files that you want analyzed.' , nargs='+') args = parser.parse_args() # Store command line arguments as convenient variables fh = args.files energy = args.energies plot = args.plot units = args.units if args.bondatoms is not None: tmp = args.bondatoms.split() bond = [int(tmp[0]), int(tmp[1])] else: bond = None if plot and bond is None: sys.exit('Need to specify the plot and bond flags together.') # Unit conversion dictionary conv = { 'au' : ['Hartree' , 1.00000000], 'ev' : ['eV' , 27.21138505], 'kcalmol' : ['kcal/mol', 627.50947428], 'kjmol' : ['kJ/mol' , 2625.49964038], 'wvn' : ['cm^{-1}' , 219474.63068 ], } # /\/\/\/\/\/\/\/\/ # Collect the files # /\/\/\/\/\/\/\/\/ if energy: e = None e_qmmm = None if plot: x = [] y = [] for f in fh: # Collect all data into memory for data retention data = collect(f) # Output data if energy: if plot: # This is currently designed to work for proton transfer # scans. # Get the bond distance. fxyz = f.split('.')[0] + '.xyz' xyz = collect(fxyz) coords1 = xyz['COORDS'][bond[0]-1] coords2 = xyz['COORDS'][bond[1]-1] d = np.sqrt( ( coords2[0] - coords1[0] ) ** 2 + ( coords2[1] - coords1[1] ) ** 2 + ( coords2[2] - coords1[2] ) ** 2 ) x.append(d) # Store the energies if 'QMMM_TOTAL_ENERGY' in data['ENERGIES'].keys(): if e is None: e = [] e.append( data['ENERGIES']['QM_TOTAL_ENERGY']*conv[units][1] ) e_qmmm = [] e_qmmm.append( data['ENERGIES']['QMMM_TOTAL_ENERGY']*conv[units][1] ) else: e.append( data['ENERGIES']['QM_TOTAL_ENERGY']*conv[units][1] ) e_qmmm.append( data['ENERGIES']['QMMM_TOTAL_ENERGY']*conv[units][1] ) else: if e is None: e = [] e.append( [ data['ENERGIES']['QM_TOTAL_ENERGY']*conv[units][1], 0.0 ] ) else: e.append( [ data['ENERGIES']['QM_TOTAL_ENERGY']*conv[units][1], 0.0 ] ) else: # Table of energy term meanings table = { 'OVERLAP_ENERGY' : 'Overlap energy of the core charge distribution:', 'CORE_SELF_ENERGY' : 'Self energy of the core charge distribution:', 'CORE_HAMILTONIAN' : 'Core Hamiltonian energy:', 'COULOMB_ENERGY' : 'Coulomb energy:', 'DFT_XC_ENERGY' : 'Exchange-correlation energy:', 'HFX_ENERGY' : 'Hartree-Fock Exchange energy:', 'DISPERSION_ENERGY' : 'Dispersion energy:', 'QMMM_ELECTROSTATIC_ENERGY' : 'QM/MM Electrostatic energy:', 'QM_TOTAL_ENERGY' : 'Total QM energy:', 'QMMM_TOTAL_ENERGY' : 'Total QM/MM energy:', } # Print the data fmt = '{0:>47} {1:20.8f}' fname = data['FILENAME'] txt = 'Output data for file: ' + fname print() print('='*len(txt)) print(txt) print('='*len(txt)) print() print('Data is output in units of: ', conv[units][0]) print() print(fmt.format(table['OVERLAP_ENERGY'], data['ENERGIES']['OVERLAP_ENERGY']*conv[units][1])) print(fmt.format(table['CORE_SELF_ENERGY'], data['ENERGIES']['CORE_SELF_ENERGY']*conv[units][1])) print(fmt.format(table['CORE_HAMILTONIAN'], data['ENERGIES']['CORE_HAMILTONIAN']*conv[units][1])) print(fmt.format(table['COULOMB_ENERGY'], data['ENERGIES']['COULOMB_ENERGY']*conv[units][1])) print(fmt.format(table['DFT_XC_ENERGY'], data['ENERGIES']['DFT_XC_ENERGY']*conv[units][1])) if 'HFX_ENERGY' in data['ENERGIES'].keys(): print(fmt.format(table['HFX_ENERGY'], data['ENERGIES']['HFX_ENERGY']*conv[units][1])) if 'DISPERSION_ENERGY' in data['ENERGIES'].keys(): print(fmt.format(table['DISPERSION_ENERGY'], data['ENERGIES']['DISPERSION_ENERGY']*conv[units][1])) print(fmt.format(table['QMMM_ELECTROSTATIC_ENERGY'], data['ENERGIES']['QMMM_ELECTROSTATIC_ENERGY']*conv[units][1])) print(fmt.format(table['QM_TOTAL_ENERGY'], data['ENERGIES']['QM_TOTAL_ENERGY']*conv[units][1])) print(fmt.format(table['QMMM_TOTAL_ENERGY'], data['ENERGIES']['QMMM_TOTAL_ENERGY']*conv[units][1])) print() if energy and plot: # Make a file containing the data fmt = '{0:12.8f} {1:12.8f} {2:12.8f}' fname = 'ptcoord.data' f = open(fname, 'w') for i in range(len(x)): if e_qmmm is not None: print(fmt.format( x[i], e[i], e_qmmm[i] ), file=f) else: print(fmt.format( x[i], e[i], 0.0 ), file=f) # For plotting the data # X-axis label xlabel = r'$R$ (\AA)' # Define units for y-axis yunit = conv[units][0] string = dedent('''\ import matplotlib.pyplot as plt from matplotlib.ticker import MultipleLocator import numpy as np from scipy import stats plt.rc('text', usetex=True) plt.rc('font', **{'family':'serif', 'serif': ['Times'], 'size': 30}) # Load the data fh = 'ptcoord.data' x,y = np.loadtxt(fh,usecols=(0,1),unpack=True) y = y - min(y) x2,y2 = np.loadtxt(fh,usecols=(0,2),unpack=True) y2 = y2 - min(y2) # Make a plot fig = plt.figure() sub = fig.add_subplot(111) sub.plot(x, y, 'b', marker='o', linewidth=1) sub.plot(x2, y2, 'g', marker='o', linewidth=1) fig.subplots_adjust(left=0.12, right=0.90, bottom=0.1, top=0.9) # Title, labels sub.set_xlabel(r'{xlabel}') # Define units for y-axis lab = 'Energy (' + '{yunit}' + ')' sub.set_ylabel(lab) # Axis limits sub.set_xlim([round(min(x),3),round(max(x),3)]) plt.show()\ ''') string = string.replace('{xlabel}', xlabel) string = string.replace('{yunit}', yunit) dataplot = open('plot_potential.mpl.py', 'w') print(string, file=dataplot)
itime = 9 if len(sys.argv) == 2: t = int(sys.argv[1]) elif len(sys.argv) > 2: print("error: should have one argument, as time step") else: t = itime angle = 5.0 * math.pi / 180.0 bevel_depth_unit = 0.5 * math.tan(angle) x_step = 20 level_num = 30 path = "ref/data" val = collect("/Fields/", "Phi_global_avg", path=path) val_2d = val[t, 0, :, :] nx = val_2d.shape[0] ny = val_2d.shape[1] dx = 0.35e-2 dy = 0.35e-2 x, y = np.mgrid[slice(0.0, dx * (nx - 0.5), dx), slice(0.0, dy * ny, dy)] #x, y=np.meshgrid(np.arange(0.0,dx*nx,dx), np.arange(0.0,dy*ny,dy)) print("shape of x, y: ", x.shape, y.shape) print("nx, ny: ", nx, ny) xmin = 1.0 #(0.5 * nx - 200) * dx xmax = 2.5 #(0.5 * nx + 200) * dx