def test_literals(): assert scanf_star("The number is: %d", "The number is: 52") == (52,) assert scanf_star("is: %d", "The number is 52", search=True) is None assert scanf_star("is: %d", "The number is: \n 52", search=True) == (52,) assert scanf.scanf("%d%% / %d", "80% / 20%") == (80, 20) assert scanf.scanf("%d%%s / %d", "80%s / 20%") == (80, 20) assert scanf.scanf("^%s", "^caret")[0] == "caret"
def loadmaze(filepath): """ 从数据文件中读入迷宫 :param filepath: 数据文件路径 :return: 读入的迷宫 """ with open(filepath, mode="r") as file: line = file.readline().strip() n, m = scanf.scanf("%d,%d", line) # 注意,读入的文件中矩阵的行列 和 屏幕坐标(x,y)是反的 # matrix[i,j],i是行下标,相当于y; j是列下标,相当于x maze = np.zeros((m, n), dtype='int') y = 0 while y < n: datas = file.readline().strip().split(",") if len(datas) != m: raise ValueError(f"第{y+1}行迷宫数据有误,应该有{m}列,但实际有{len(datas)}列") for x in range(m): maze[x, y] = ord(datas[x]) - ord('0') y += 1 line = file.readline().strip() entrance = scanf.scanf("%d,%d", line) line = file.readline().strip() exit = scanf.scanf("%d,%d", line) m = Maze(maze, entrance, exit) return m
def encontrarPadrao(arquivo): caminho = list(re.split('/', arquivo))[1:] #padrao = ["\d+-\d+-\d+","L\d+","M\d+","\s_\d+_d\+"] #padrao = ["%d-%d-%d","L%s","M%s","%s_%d_%d"] padrao = ["%d-%d-%d", "%s", "L%s_M%s", "%s_%f-%f"] data = None medidor = None lote = None fMinimo = None fMaximo = None tipo = None periodo = None for subcaminho in caminho: for contador, subpadrao in enumerate(padrao): if (scanf(subpadrao, subcaminho)): dados = scanf(subpadrao, subcaminho) if (contador == 0): data = datetime.date(dados[2], dados[1], dados[0]) if (contador == 1): temp = dados[0] if (temp == "manha" or temp == "tarde" or temp == "noite"): periodo = temp if (contador == 2): #medidor = dados[0] lote = dados[0] medidor = dados[1] if (contador == 3): comentario = dados[0] fMinimo = dados[1] fMaximo = dados[2]
def read(filename): gx, gy, gz = [], [], [] with open("../env_movements/" + filename + ".txt") as data: freq = scanf("%f", data.readline()) for line in data: _, _, _, _, gxt, gyt, gzt = scanf("%f %f %f %f %f %f %f", line) gx.append(gxt) gy.append(gyt) gz.append(gzt) return gx, gy, gz
def scanf_star(fmt, data, *args, **kw): """Wrap a scanf test to also test null conversion""" result = scanf.scanf(fmt, data, *args, **kw) if result is None: return result null_fmt = fmt.replace('%', '%*') assert scanf.scanf(null_fmt, data, *args, **kw) == () if "%r" not in fmt: result_w_rest = scanf.scanf(fmt + r"%r", data, *args, **kw) rest_only = scanf.scanf(null_fmt + r"%r", data, *args, **kw) assert result_w_rest[-1] == rest_only[0] return result
def main(raw_input): rule_dict = {} for line in raw_input.split("\n\n")[0].strip().splitlines(): name, a, b, c, d = scanf.scanf("%s:%d-%dor%d-%d", line.replace(" ", "")) rule_dict[name] = [a, b, c, d] my_ticket = [ int(a) for a in scanf.scanf("your ticket:\n%s", raw_input)[0].split(",") ] nearby_ticket_list = [[ int(a) for a in b.split(",") ] for b in raw_input.split("nearby tickets:")[1].strip().splitlines()] valid_ticket_list = [my_ticket] part1 = 0 for ticket in nearby_ticket_list: match_ticket = True for value in ticket: if not any([match(rule, value) for rule in rule_dict.values()]): part1 += value match_ticket = False if match_ticket: valid_ticket_list.append(ticket) ticket_match_dict = {} for name, rule in rule_dict.items(): ticket_match_dict[name] = [] for index in range(len(my_ticket)): if all( [match(rule, ticket[index]) for ticket in valid_ticket_list]): ticket_match_dict[name].append(index) finished_fields = set() while len(finished_fields) != len(ticket_match_dict): for key, value in ticket_match_dict.items(): if len(value) == 1 and key not in finished_fields: finished_fields.add(key) for other_key, other_value in ticket_match_dict.items(): if key != other_key and value[0] in other_value: other_value.remove(value[0]) part2 = 1 for key, value in ticket_match_dict.items(): if key.startswith("departure"): part2 *= my_ticket[value[0]] return part1, part2
def parse(strMsg): # parse fix #pattern = "GGA,%s,%f,%s,%f,%s,%d,%d,%f,%f,%s" #pattern = "$GPGGA,%s,%f,%s,%f,%s,%d,%d,%f,%f,%s" #pattern = "$: %f, %f" #$: 10.8, 47.83197, 16.25626, 8614.223, 117.8657, -3.140063, -1.100116, 3.140674 #pattern = "$: %f, %f, %f, %f, %f, %f, %f, %f" pattern = "$: %f, %f, %f, %f, %f, %f, %f, %f" time = None lat = None lon = None alt = None vel = None roll_rad = None pitch_rad = None heading_rad = None try: result = scanf(pattern, strMsg) print result time = result[0] lat = result[1] lon = result[2] alt = result[3] vel = result[4] roll_rad = result[5] pitch_rad = result[6] heading_rad = result[7] except: #print "Wrong NMEA GPGGA format" pass return time, lat, lon, alt, vel, roll_rad, pitch_rad, heading_rad
def read(filename): ax, ay, az, gx, gy, gz = [], [], [], [], [], [] with open("../env_movements/" + filename + ".txt") as data: freq = scanf("%f", data.readline()) for line in data: # time[ms], ax, ay, az, gx, gy, gz _, axt, ayt, azt, gxt, gyt, gzt = scanf("%f %f %f %f %f %f %f", line) ax.append(axt) ay.append(ayt) az.append(azt) gx.append(gxt) gy.append(gyt) gz.append(gzt) return freq, ax, ay, az, gx, gy, gz
def parse_line(self, str): # https://pypi.org/project/scanf/ arr = scanf.scanf(self.format_str, str) # https://stackoverflow.com/a/7816439 if arr is not None: self.append_data(arr)
def recombine(self, path=None): from os import listdir from os import getcwd from scanf import scanf ls = [ join(self.tmpdir, f) for f in listdir(self.tmpdir) if any(['%s.amp' % self.uid in f, '%s.bias.amp' % self.uid in f]) ] coeff = np.zeros((4, 4096, 4096)) for f in ls: amp = scanf("%s.amp.%d", basename(f))[1] print("Parsing amp #%.2d" % amp) coeff[:, :, (amp - 1) * 128:(amp) * 128] = fits.getdata(f) hdu = fits.PrimaryHDU() hdu.data = coeff from datetime import datetime time = datetime.now().strftime("%Y-%m-%dT%H:%M:%S") uid = datetime.now().strftime("%y%m%d%H%M%S") hdu.header['DATE'] = (time, "Creation date") hdu.header['UNIQUEID'] = ( uid, "Unique identification number for this file.") hdu.header['RUID'] = (self.uid, "Built from this ramp uid.") hdu.writeto(join(path, '%s.nl.fits' % self.uid), overwrite=True)
def recombine(self): from os import listdir from os.path import basename from scanf import scanf ls = [ join(self.pathtmp, f) for f in listdir(self.pathtmp) if '%sx%s.amp' % (self.uid1, self.uid2) in f ] _map = np.zeros((4, 4096, 4096)) for f in ls: amp = scanf("%s.amp.%d", basename(f))[1] print("Parsing amp #%.2d" % amp) _map[:, :, (amp - 1) * 128:(amp) * 128] = fits.getdata(f) hdu = fits.PrimaryHDU() hdu.data = _map from datetime import datetime time = datetime.now().strftime("%Y-%m-%dT%H:%M:%S") uid = datetime.now().strftime("%y%m%d%H%M%S") hdu.header['DATE'] = (time, "Creation date") hdu.header['UNIQUEID'] = ( uid, "Unique identification number for this file.") hdu.header['RUID1'] = (self.uid1, "Built from this ramp uid.") hdu.header['RUID2'] = (self.uid2, "Built from this ramp uid.") hdu.writeto(join(self.pathtmp, '%sx%s.gain.fits' % (self.uid1, self.uid2)), overwrite=True)
def ajust(self, auto_ajust): if auto_ajust: self.ser.write("p") time.sleep(0.1) reply = self.ser.read(100) self.calib = scanf("%f,%f,%f,%f,%f,%f", reply) else: self.calib = [1] * 6
def GetGraphData(self, file_name, columns): graph_data = {} metadata = '' with open(file_name, 'r') as myfile: for index, line in enumerate(myfile): if re.search(r'^~', line) is not None: skip_lines = index + 1 headlist = re.findall(r'[\w]+', line) break else: metadata += line graph_data['nodes number'] = scanf('<NUMBER OF NODES> %d', metadata)[0] graph_data['links number'] = scanf('<NUMBER OF LINKS> %d', metadata)[0] graph_data['zones number'] = scanf('<NUMBER OF ZONES> %d', metadata)[0] first_thru_node = scanf('<FIRST THRU NODE> %d', metadata)[0] dtypes = { 'init_node': np.int32, 'term_node': np.int32, 'capacity': np.float64, 'length': np.float64, 'free_flow_time': np.float64, 'b': np.float64, 'power': np.float64, 'speed': np.float64, 'toll': np.float64, 'link_type': np.int32 } df = pd.read_csv(file_name, names=headlist, dtype=dtypes, skiprows=skip_lines, sep=r'[\s;]+', engine='python', index_col=False) df = df[columns] df.insert(loc=list(df).index('init_node') + 1, column='init_node_thru', value=(df['init_node'] >= first_thru_node)) df.insert(loc=list(df).index('term_node') + 1, column='term_node_thru', value=(df['term_node'] >= first_thru_node)) graph_data['graph_table'] = df return graph_data
def main(input_lines): # Part 1 memory = {} for line in input_lines: instruction, value = line.strip().split(" = ") if instruction == "mask": mask = value else: address = scanf.scanf("mem[%s]", instruction)[0] masked_value = list(bin(int(value))[2:]) masked_value = ["0"] * (36 - len(masked_value)) + masked_value for bi, mask_bit in enumerate(mask): if mask_bit == "0": masked_value[bi] = "0" elif mask_bit == "1": masked_value[bi] = "1" memory[address] = int("".join(masked_value), 2) part1 = sum(memory.values()) # Part 2 memory = {} for line in input_lines: instruction, value = line.strip().split(" = ") if instruction == "mask": mask = value else: address = scanf.scanf("mem[%s]", instruction)[0] masked_address = list(bin(int(address))[2:]) masked_address_list = [["0"] * (36 - len(masked_address)) + masked_address] for bi, mask_bit in enumerate(mask): if mask_bit == "1": for mi in range(len(masked_address_list)): masked_address_list[mi][bi] = "1" elif mask_bit == "X": current_len = len(masked_address_list) for mi in range(current_len): new_address = masked_address_list[mi].copy() new_address[bi] = "1" masked_address_list.append(new_address) masked_address_list[mi][bi] = "0" for address in masked_address_list: memory[int("".join(address), 2)] = int(value) part2 = sum(memory.values()) return part1, part2
def handle_ts_data(topic, msg): if (str(topic)).find('Loc raw') < 0: # print('no find robot filtered Topic is: %s,'%topic) return -1, -1 # print(' Topic: %s, msg:%s' % (str(topic), str(msg))) pos_x, pos_y, z, heading_, gamma_, theta_, ts_, bsIdx_, sensorId_ = scanf.scanf( "%f %f %f %f %f %f %d %d %d", str(msg)) return ts_, sensorId_
def read_frompath(filename): ax, ay, az, gx, gy, gz = [], [], [], [], [], [] with open(filename) as data: freq = scanf("%f", data.readline()) for line in data: # time[ms], ax, ay, az, gx, gy, gz _, axt, ayt, azt, gxt, gyt, gzt = scanf("%f %f %f %f %f %f %f", line) ax.append(axt) ay.append(ayt) az.append(azt) gx.append(gxt) gy.append(gyt) gz.append(gzt) return freq, ax, ay, az, gx, gy, gz
def geraDadosBarras(data, pontos): num_max_tentativas = 20 num_max_tentativas = num_max_tentativas - 1 data_freq = [] for i in pontos: #le arquivo Forca.dat para extrair a frequencia arquivo = open("..\\ponto %s\\Forca.dat" % (i), "r") arq_lines = arquivo.readlines() #trata os dados com o scanf pattern = '%d %f \n' dados = scanf(pattern, arq_lines[4]) data_freq.append(dados[1]) data_plot_freq = [] data_plot_duracao_carga = [] data_plot_carga_inicial = [] #percorre os dados procurando os trechos intaveis contador = -1 for data_i in data: contador = contador + 1 trecho_estavel = True encontrou_trechoAnalise = False possivel_fim_trechoAnalise = False for data_linha in data_i: if data_linha[2] > 1 and encontrou_trechoAnalise == False: #possivel trecho instavel if data_linha[2] > num_max_tentativas: #trecho instavel trecho_estavel = False encontrou_trechoAnalise = True elif data_linha[2] < 2: if encontrou_trechoAnalise == False: carga_inicial = data_linha[1] elif encontrou_trechoAnalise == True and possivel_fim_trechoAnalise == False: #possivel termino do trecho_de analise instavel possivel_fim_trechoAnalise = True carga_inicial_temp = data_linha[1] else: trecho_estavel = True possivel_fim_trechoAnalise = False elif possivel_fim_trechoAnalise == True: possivel_fim_trechoAnalise = False if trecho_estavel == True and encontrou_trechoAnalise == True: encontrou_trechoAnalise = False trecho_estavel = True data_plot_freq.append(data_freq[contador]) data_plot_duracao_carga.append(carga_inicial_temp - carga_inicial) data_plot_carga_inicial.append(carga_inicial) carga_inicial = carga_inicial_temp if encontrou_trechoAnalise == True: data_plot_freq.append(data_freq[contador]) data_plot_duracao_carga.append(data_linha[1] - carga_inicial) data_plot_carga_inicial.append(carga_inicial) return data_plot_freq, data_plot_duracao_carga, data_plot_carga_inicial
def convertGPSTextToList(self, gpsData, timeZone): print('converting gps text to list') generatedData = [] geoidheight = None for line in gpsData: sLine = line.split(' ') lat = None lon = None dateL = None timeL = None uS = None timeAbsolute = None lat = scanf('lat="%f"', sLine[1])[0] lon = scanf('lon="%f">%s', sLine[2])[0] dateL = scanf('%s<time>%sT%s', sLine[2])[1] timeL = scanf('%s<time>%sT%sZ%s', sLine[2])[2] if (sLine[2].__contains__('<geoidheight>')): geoidheight = scanf('%s<geoidheight>%f</geoidheight>%s', sLine[2])[1] else: if(geoidheight is None): geoidheight = 0.0 dateS = scanf('%d-%d-%d', dateL) timeS = scanf('%d:%d:%d.%d', timeL) timeS = datetime.datetime(dateS[0], dateS[1], dateS[2], timeS[0], timeS[1], timeS[2], timeS[3]) timeAbsolute = time.mktime(timeS.timetuple()) + timeZone * 60 * 60 lLine = [lat,lon,geoidheight,dateL ,timeL, uS, timeAbsolute] generatedData.append(lLine) print('GPS data dates from: ' + str(generatedData[0][6]) + ' to: ' + str(generatedData[-1][6])) return generatedData
def read_array(text_headle, FILE_DATA, array_length=6): t_match = re.search(text_headle, FILE_DATA) if t_match is not None: t1 = t_match.end() myformat = ('%f ' * array_length)[:-1] temp1 = np.array(scanf(myformat, FILE_DATA[t1:])) else: temp1 = np.ones(array_length) temp1[:] = np.nan return temp1
def encontrarPadrao2(arquivo): caminho = list(re.split('/', arquivo))[1:] padrao = ["%d-%d-%d", "L%s", "M%s", "%s_%d_%d"] data = None medidor = None lote = None fMinimo = None fMaximo = None tipo = None periodo = None for subcaminho in caminho: for contador, subpadrao in enumerate(padrao): if (scanf(subpadrao, subcaminho)): dados = scanf(subpadrao, subcaminho) if (contador == 0): data = datetime.date(dados[2], dados[1], dados[0]) if (contador == 1): lote = dados[0] if (contador == 2): medidor = dados[0] medidor = dados[1] if (contador == 3): comentario = dados[0] fMinimo = dados[1] fMaximo = dados[2] if (data == None or medidor == None or lote == None or fMinimo == None or fMaximo == None or periodo == None): return else: try: medidor = Medidor.objects.get(medidor="L%s-M%s" % (lote, medidor)) MedidaEletromagnetica.objects.update_or_create( medidor=medidor, data=data, dado=arquivo, fMinima=fMinimo, fMaxima=fMaximo, comentarios=comentario, periodo=periodo) except: print("erro ao adicionar L%s-M%s" % (lote, medidor))
def ReadAnswer(self, filename): with open(filename) as myfile: lines = myfile.readlines() lines = lines[1:] flows = [] times = [] for line in lines: _, _, flow, time = scanf('%d %d %f %f', line) flows.append(flow) times.append(time) return {'flows': flows, 'times': times}
def handle_pos_data(topic, msg): if (str(topic)).find('robot filtered') < 0: # print('no find robot filtered Topic is: %s,'%topic) return -1, -1 # print(' Topic: %s, msg:%s' % (str(topic), str(msg))) pos_x, pos_y, z, heading_, gamma_, theta_, ts_, bsIdx_, sensorId_ = scanf.scanf( "%f %f %f %f %f %f %d %d %d", str(msg)) # print(pos_x, pos_y, z, heading_, gamma_, theta_, ts_, bsIdx_, sensorId_) # &x, &y, &z, &heading_, &gamma_, &theta_, &ts_, &bsIdx, &sensorId # print('x:%f y:%f'%(pos_x,pos_y)) return pos_x, pos_y
def main(input_lines): part1 = 0 part2 = 0 for line in input_lines: low, high, letter, password = scanf.scanf("%d-%d %c: %s", line) if low <= password.count(letter) <= high: part1 += 1 if (password[low - 1] == letter) ^ (password[high - 1] == letter): part2 += 1 return part1, part2
def convertRadiationTextToList(self, radiationData): print('converting radiation text to list') generatedData = [] for line in radiationData: splittedLine = line.split(',') dateL = None timeL = None timeAbsolute = None datatimeSplitted = splittedLine[0].split(' ') dateS = scanf('%d-%d-%d', datatimeSplitted[0]) timeS = scanf('%d:%d', datatimeSplitted[1]) timeL = datetime.datetime(dateS[0], dateS[1], dateS[2], timeS[0], timeS[1], 30, 0) timeAbsolute = time.mktime(timeL.timetuple()) uS = str(splittedLine[2]) if('.' in uS): uS = float(uS) else: uS = int(uS) * 6.49956E-09 * 1000000 howOften = splittedLine[1] lLine = [timeAbsolute, dateS, timeS, howOften, uS, splittedLine[3:]] generatedData.append(lLine) print('Radiation data dates from: ' + str(generatedData[0][0]) + ' to: ' + str(generatedData[-1][0])) return generatedData
def master_map(self): from scanf import scanf from os import listdir from os.path import basename ls = [ join(self.pathtmp, f) for f in listdir(self.pathtmp) if ".gain.fits" in f ] uid_pairs = [scanf("%sx%s.gain.fits", basename(f)) for f in ls] print("Will use uid pair:") for uid in uid_pairs: print("%s X %s" % (uid[0], uid[1])) cube = [fits.getdata(f)[0] for f in ls] gg = np.median(cube, axis=0) hdu = fits.PrimaryHDU(data=gg) from datetime import datetime time = datetime.now().strftime("%Y-%m-%dT%H:%M:%S") uid = datetime.now().strftime("%y%m%d%H%M%S") hdu.header['DATE'] = (time, "Creation date") hdu.header['UNIQUEID'] = ( uid, "Unique identification number for this file.") hdu.header.add_comment("Built using:") for uid in uid_pairs: hdu.header.add_comment("%s,%s" % (uid[0], uid[1])) hdu.writeto(join(self.path, "master_gain.fits")) print("master_gain.fits created") fig, ax = plt.subplots() im = ax.imshow(gg, cmap='YlGn', vmin=.7, vmax=1.9) plt.colorbar(im) ax.xaxis.set_visible(False) ax.yaxis.set_visible(False) g, _, g_err = sc(gg) ax.set(title='Gain %2.2f+/-%2.2f adu/e$^-$' % (g, g_err)) plt.tight_layout() fig.savefig(join(self.path, 'gain.png')) sns.set_theme() f1, ax1 = plt.subplots() gg_flat = gg.ravel() gg_flat = gg_flat[(gg_flat > .9) & (gg_flat < 1.9)] g, _, g_err = sc(gg_flat) ax1.hist(gg_flat, bins=50) ax1.set(title='Median, %.2f+/-%.2f' % (g, g_err), xlabel='Gain', ylabel='Occurance') plt.tight_layout() f1.savefig(join(self.path, 'gain.dist.png')) plt.show()
def _parse(line: str) -> AddrCommand: struct = AddrCommand() try: _id, command, addr, size, value = scanf(PATTERN_SCANF, line) except Exception as e: logging.error("parse - %s - line : %s", e, line) raise e else: struct._id = _id struct.command = command struct.addr = addr struct.size = size struct.value = value finally: return struct
def min_max_indices_2d(varName, inputFilename): varName = varName.lower() index_1 = [] index_2 = [] with open(inputFilename, 'r') as f: inputFile = f.readlines() for line in inputFile: line3 = line.strip().lower() find_index = line3.find(varName + '(') # Line contains desired varName if (find_index > -1): out = scanf(varName + "(%d,%d)", line[find_index::].lower()) index_1.append(out[0]) index_2.append(out[1]) return min(index_1), min(index_2), max(index_1), max(index_2)
def GetGraphCorrespondences(self, file_name): with open(file_name, 'r') as myfile: trips_data = myfile.read() total_od_flow = scanf('<TOTAL OD FLOW> %f', trips_data)[0] #zones_number = scanf('<NUMBER OF ZONES> %d', trips_data)[0] origins_data = re.findall(r'Origin[\s\d.:;]+', trips_data) graph_correspondences = {} for data in origins_data: origin_index = scanf('Origin %d', data)[0] origin_correspondences = re.findall(r'[\d]+\s+:[\d.\s]+;', data) targets = [] corrs_vals = [] for line in origin_correspondences: target, corrs = scanf('%d : %f', line) targets.append(target) corrs_vals.append(corrs) graph_correspondences[origin_index] = { 'targets': targets, 'corrs': corrs_vals } return graph_correspondences, total_od_flow
def recv_optic_loc(self): x = None y = None radian = None while self.recv_socket.poll(20): # 20毫秒不能太高,因为位置频率很高,会陷入死循环,一直为true try: topic, msg = self.recv_socket.recv_multipart() if msg is not None: x, y, z, radian, gamma, theta, ts, bs_index, sensor_id \ = scanf.scanf("%f %f %f %f %f %f %f %d %d", msg.decode()) except zmq.error.Again as err: print('Exception:%s', err) # if x is not None and y is not None: # x = x / 100.00 # y = y / 100.00 return x, y, radian
def preprocess_timestamp(timestamp_with_timezone_offset): timestamp_without_timezone_offset = timestamp_with_timezone_offset.split( ' ')[0] ( day, month, year, hour, minute, second, ) = scanf("%d/%s/%d:%d:%d:%d", timestamp_without_timezone_offset) month = convert_month_str_to_int(month) # extract target value from timestamp_without_timezone_offset to create datetime object timestamp = datetime(year, month, day, hour) # return a date string with milliseconds (3 decimal places behind seconds) return timestamp.strftime("%Y-%m-%d T %H:%M:%S.%f")[:-3]
def publish_loc_data(self, topic, msg): """ 发布位置数据到控制器 """ if topic not in str_loc_type or msg is None: print('str_loc_type not in str_loc_type or msg is None:', topic, msg) return try: x, y, z, heading, gamma, theta, ts, bs_index, sensor_id \ = scanf.scanf("%f %f %f %f %f %f %f %d %d", msg.decode()) location = message_pb2.Location() location.type = str_loc_type[topic] location.coordinates.x = x location.coordinates.y = y location.coordinates.z = z location.heading = heading location.gamma = gamma location.theta = theta location.ts = ts location.bs_index = bs_index location.sensor_id = sensor_id location.sensor_channel_id = 0 location.terminal_id = global_local_mac_addr # print('publish_loc_data:', location) msg_topic = b'Location' msg_content = location.SerializeToString() try: if self._socket_pub_loc.poll(500, zmq.POLLOUT): self._socket_pub_loc.send_multipart( [msg_topic, msg_content]) except zmq.error.Again as err: MLog.mlogger.warn('Exception:%s', err) MLog.mlogger.warn(traceback.format_exc()) self.reconnect_controller() except Exception as err: MLog.mlogger.warn('Exception err: %s,%s', err, msg.decode()) return
sys.exit(f"Usage: {sys.argv[0]} BASE_SHAPE_ID") figfile = sys.argv[0].replace( '.py', f'-{BASE_SHAPE_ID}.pdf') # Choose which base shape according to command-line argument and parse # out the shape parameters if any if BASE_SHAPE_ID == "paraboloid": base_shape = bp.paraboloid_R_theta shape_label = "Paraboloid" elif BASE_SHAPE_ID == "wilkinoid": base_shape = bp.wilkinoid_R_theta shape_label = "Wilkinoid" elif BASE_SHAPE_ID.startswith("cantoid"): ibeta, = scanf.scanf("cantoid-beta%d", BASE_SHAPE_ID) beta = ibeta / 100000 base_shape = bp.Spline_R_theta_from_function( ngrid=1000, shape_func=bp.cantoid_R_theta, shape_func_pars=(beta,)) shape_label = rf"Cantoid $\beta = {beta}$" elif BASE_SHAPE_ID.startswith("ancantoid"): ixi, ibeta = scanf.scanf("ancantoid-xi%d-beta%d", BASE_SHAPE_ID) xi, beta = ixi / 100, ibeta / 100000 base_shape = ancantoid_shape.Ancantoid(xi=xi, beta=beta, n=301) shape_label = rf"Ancantoid $\xi = {xi:.1f}$, $\beta = {beta}$" elif BASE_SHAPE_ID.startswith("dragoid"): ialpha, = scanf.scanf("dragoid-alpha%d", BASE_SHAPE_ID) alpha = ialpha / 100 base_shape = dragoid_shape.Dragoid(alpha=alpha) shape_label = rf"Dragoid $\alpha_\mathrm{{drag}} = {alpha:.2f}$"