def endpoint(event, context): sqs = boto3.resource('sqs') queue = sqs.get_queue_by_name(QueueName=str(os.environ['SQS'])) s3 = boto3.resource("s3") bucket = s3.Bucket(str(os.environ['S3'])) body = [] proxy = {'https': str(os.environ['PROXY'])} for message in queue.receive_messages(MaxNumberOfMessages=2): try: info = get_info(url=message.body, proxy=proxy) info["url"] = message.body s3_body = json.dumps(info) key = f"{message.body}|{datetime.datetime.utcnow()}.json".replace( "/", "|") bucket.put_object(Key=key, Body=s3_body) except Exception as e: body.append({ "url": message.body, "state": False, "error": repr(e) }) else: body.append({"url": message.body, "state": True}) finally: message.delete() response = {"statusCode": 200, "body": body} return response
def get_url(): """ 对链接的内容进行提取 :return: """ mode = request.args.get('mode') likenum = int(request.args.get('num')) if not mode: # testing mode print("Testing mode") url = 'https://mp.weixin.qq.com/s/g-xI1RkU7UUC8pQsDCZD_A' uid = '0' usrname = '葛蒙蒙' else: # regular mode print("Regular mode") uid = request.args.get('uid') url = request.args.get('url') usrname = request.args.get('name') print( "Getting info as:----------\nurl:%s\nuid:%s\nusername:%s\n------------------" % (url, uid, usrname)) info = get_info(url) # 从链接中提取信息,并下载至本地 if info['code'] == 1: # 当信息提取成功时 title = info['title'] pics_joint(title, uid, usrname, likenum) # 生成点赞页面 rep = {'code': 1, 'content': url} else: # 若信息提取失败,则直接返回错误信息类型 rep = info return jsonify(rep)
def html_to_json(filename): with open(filename, 'rb') as fp: html_doc = fp.read() category, name = os.path.basename(filename).split('.')[0].split('_', 1) data = get_info(category, html_doc) uid = '{}_{}'.format(category, name) for key, val in data.iteritems(): with open(os.path.join(JSON_DIR, '{}_{}.json'.format(uid, key)), 'wb') as fp: json.dump(val, fp)
def get_T2TSE_info(prostate_path): ''' Extract the necessary information about T2TSE files of all the patients. Parameters ---------- prostate_path : String Path the folder containing all the patient files. Returns ------- image_info_df_merged : DataFrame DataFrame containing necessary information about T2TSE files of all the patients. ''' prostate_list = os.listdir(prostate_path) image_info_df_list = [] for patient in prostate_list: patient_path = os.path.join(prostate_path, patient) if os.path.isdir(patient_path): patient_images_path = os.path.join(patient_path, "1") patient_images_list = os.listdir(patient_images_path) for patient_image in patient_images_list: prefix = pickT2TSEFile.get_T2LO_prefix(prostate_path) if prefix[patient] in patient_image: filtered_0 = patient_image if "T2TSE" in filtered_0: filtered_1 = filtered_0 image_path = os.path.join(patient_images_path, filtered_1) image_info = get_info(image_path) print("Processing: ", image_path) image_info_df = pd.DataFrame( { "Patient": patient, "File_name": filtered_1, "Origin_X": image_info['Origin'][0], "Origin_Y": image_info['Origin'][1], "Origin_Z": image_info['Origin'][2], "Size_X": image_info['Size'][0], "Size_Y": image_info['Size'][1], "Size_Z": image_info['Size'][2], "Spacing_X": image_info['Spacing'][0], "Spacing_Y": image_info['Spacing'][1], "Spacing_Z": image_info['Spacing'][2], }, index=[0]) image_info_df_list.append(image_info_df) image_info_df_merged = pd.concat(image_info_df_list) image_info_df_merged.index = range(0, len(image_info_df_merged), 1) return image_info_df_merged
def findMovies(userid): usersets = Storer('userdata.txt').get_usersets(str(userid)) afisha = Storer('database.txt').get_data('afisha') afisha_movies = get_info(usersets, afisha) if afisha_movies: for movie in afisha_movies: bot.send_message(userid, movie) else: bot.send_message(userid, "Из текущего репертуара подходящих фильмов нет") bot.send_message(userid, "Обзор среди премьер текущего месяца: ") time.sleep(1) premieres = Storer('database.txt').get_data('premieres') premieres_movies = get_info(usersets, premieres) if premieres: for movie in premieres_movies: bot.send_message(userid, movie) else: bot.send_message(userid, "Увы, и среди премьер подходящих фильмов нет") bot.send_message(userid, 'Готово', reply_markup=markups.start_markup)
def main(): ts = time() info_list = get_info.get_info() yy = info_list[0] mm = info_list[1] c_list = info_list[2] tablespath = "/var/www/html/tables/" kill_old_tables.kill_old_tables(c_list, tablespath) jobs = [gevent.spawn(crawler.gettable, yy, mm, classname) for classname in c_list] gevent.wait(jobs) get_index.get_index(tablespath) print('Took {}s'.format(time() - ts))
def respond_url_query(url): info = get_info.get_info(url) print("retrieving") data = info[1].to_frame().T property_name = info[0] analytical_data = convert_raw_data.get_AnalyticalData(data) print("converting to analytical_data") text_to_render = convert_to_json.convert_to_json(analytical_data, predictor.get_pred, property_name) print("converting to json") return render_template('visualization.html', title=property_name, message="以下グラフが表示されます", data=text_to_render)
def monitor_price(number_list, flag): for i in range(len(number_list)): number = number_list[i][0] remind_price = number_list[i][1] orientation = number_list[i][2] info, current_price = a.get_info(number) if ((current_price > remind_price) and (orientation == 1)): if (flag[i] == False): b.sent_email(info) flag[i] = True elif ((current_price < remind_price) and (orientation == 0)): if (flag[i] == False): b.sent_email(info) flag[i] = True
def main(): full_outp = "" # classes = get_info() classes = None with open("/home/steelcowboy/pass_html/pass-2018521-1545.html", "r") as ihtml: classes = get_info(ihtml) quarter = classes["quarter"] classes = classes["classes"] full_outp += f"\033[31m{quarter}\033[0m\n\n" for cls in classes: full_outp += print_class(cls) print(max_room_len) return full_outp
def index(): if request.method == 'POST': id = request.form["id"] try: mapped_id = mapping[id] if mapped_id not in data['user_id'].values: return render_template('home.html') else: segment, n_purchase, clv = get_info(data, mapped_id) fig_data, sp_trans_list = get_customer_journey( transactions, bgf, mapped_id) return render_template('home.html', id=id, segment=segment, n_purchase=n_purchase, clv=clv, fig_data=fig_data, history=sp_trans_list) except: return render_template('home.html') return render_template('home.html')
def getBasicInfo(path, keyword1='new', keyword2='.nii'): ''' Extracts the basic information about a particular type of file of all the patients. Parameters ---------- path : String Path to the folder containing all cases folder. The case folder each contains patients image files. keyword1 : String, optional Filter to look for certain file(s) containing the inputted string. The default is 'new'. keyword2 : String, optional Filter to look for certain file(s) containing the inputted string. The default is '.nii'. Returns ------- image_info_df_merged : DataFrame DataFrame containing basic information about a particular type of file of all the patients. ''' cases = os.listdir(path) image_info_df_list = [] for case in cases: case_path = os.path.join(path, case) if os.path.isdir(case_path): patients = os.listdir(case_path) for patient in patients: patient_path = os.path.join(case_path, patient) patient_dir_content = os.listdir(patient_path) for content in patient_dir_content: if keyword1 in content: filtered_0 = content if keyword2 in filtered_0: filtered_1 = filtered_0 image_path = os.path.join( patient_path, filtered_1) image_info = get_info(image_path) sitk_image = sitk.ReadImage(image_path) data = sitk.GetArrayFromImage(sitk_image) print("Patient", patient, "of", case, "is being processed.") image_info_df = pd.DataFrame( { "Case": case, "Patient": patient, "File_name": filtered_1, "Origin_X": image_info['Origin'][0], "Origin_Y": image_info['Origin'][1], "Origin_Z": image_info['Origin'][2], "Size_X": image_info['Size'][0], "Size_Y": image_info['Size'][1], "Size_Z": image_info['Size'][2], "Spacing_X": image_info['Spacing'][0], "Spacing_Y": image_info['Spacing'][1], "Spacing_Z": image_info['Spacing'][2], "Image Dimension (sitk)": sitk_image.GetDimension(), "Image np Array Shape": [data.shape], }, index=[0]) image_info_df_list.append(image_info_df) image_info_df_merged = pd.concat(image_info_df_list) image_info_df_merged.index = range(0, len(image_info_df_merged), 1) return image_info_df_merged
niere_path = r"C:\Users\michaely\Documents\hiwi\DeepMedic Stuff\Convert to nifti 20052020\3DSlicer_Niere" niere_list = os.listdir(niere_path) for file in niere_list: folder_path = os.path.join(niere_path, file) if os.path.isdir(folder_path): cases_dir = os.listdir(folder_path) for patient in cases_dir: patient_path = os.path.join(folder_path, patient) patient_dir = os.listdir(patient_path) for file in patient_dir: if "new_seg" in file: segmentation_path = os.path.join(patient_path, file) print("Processing:\n", segmentation_path) resample(image_path=segmentation_path, out_image_path=patient_path, segmentation_or_image="segmentation", is_label=True) print("\nResampled segmentation saved in:", patient_path, "\n") get_info( r"C:\Users\michaely\Documents\hiwi\DeepMedic Stuff\Convert to nifti 20052020\3DSlicer_Niere\Phlebolithen\P096\new_segmentation.nii" ) get_info( r"C:\Users\michaely\Documents\hiwi\DeepMedic Stuff\Convert to nifti 20052020\3DSlicer_Niere\Phlebolithen\P096\resampled_segmentation.nii" )
def the_equalizer(): os.system("clear") print("Enter information for Nessus Server:") username = raw_input("\nUsername: "******"VisProj" password = getpass.getpass() #"Password1234" port = raw_input("Port the server is running on: ") #"8834" create_token(username, password, port) token = get_token() os.system("clear") printer.banner() while 1: printer.print_main_menu() option = raw_input(printer.header) if option == 'c' or option == 'C': os.system("clear") printer.banner() elif option == 'e' or option == 'E': os.system("rm -rf token.txt") os.system("rm -rf report.txt") sys.exit() elif option == '1': get_info(token, port, '2') option = raw_input("\n(press q to quit): ") while option != 'q': option = raw_input("\n(press q to quit): ") the_equalizer() elif option == '2': report_id = raw_input(printer.report_id) get_report(token, port, report_id, '2') option = raw_input("\n(press q to quit): ") while option != 'q': option = raw_input("\n(press q to quit): ") the_equalizer() elif option == '3': report_id = raw_input(printer.report_id) get_report(token, port, report_id, '3') ips = get_ips() vulns = get_total_vulnerabilities() table = BeautifulTable() table.column_headers = ("IP", "# of Vulnerabilities") print "\n" x = 0 while x < len(ips): table.append_row([ips[x], vulns[x]]) x += 1 print table option = raw_input("\n(press q to quit): ") while option != 'q': option = raw_input("\n(press q to quit): ") elif option == '4': chart_menu(token, port) os.system("clear") printer.banner() os.system("rm -rf token.txt") os.system("rm -rf report.txt")
def get_ADC_info(prostate_path): ''' Extract the necessary information about ADC files of all the patients. Parameters ---------- prostate_path : String Path the folder containing all the patient files. Returns ------- image_info_df_merged : DataFrame. DataFrame containing necessary information about ADC files of all the patients. ''' prostate_list = os.listdir(prostate_path) image_info_df_list = [] listed = [] for patient in prostate_list: patient_path = os.path.join(prostate_path, patient) if os.path.isdir(patient_path): patient_images_path = os.path.join(patient_path, "1") patient_images_list = os.listdir(patient_images_path) for patient_image in patient_images_list: if "ADC" in patient_image: check1 = patient_image if "LO" in check1: patient_image = check1 image_path = os.path.join(patient_images_path, patient_image) image_info = get_info(image_path) print("Processing: ", image_path) reader = sitk.ImageFileReader() reader.SetFileName(image_path) image = reader.Execute() data = sitk.GetArrayFromImage(image) data.shape # tumor_label = data[:,:,:,0] final_ROI = data label_final_ROI = label(final_ROI) props = regionprops(label_final_ROI) for prop in props: image_info_df = pd.DataFrame({"Patient":patient, "File_name":patient_image, "Origin_X":image_info['Origin'][0], "Origin_Y":image_info['Origin'][1], "Origin_Z":image_info['Origin'][2], "Size_X":image_info['Size'][0], "Size_Y":image_info['Size'][1], "Size_Z":image_info['Size'][2], "Spacing_X":image_info['Spacing'][0], "Spacing_Y":image_info['Spacing'][1], "Spacing_Z":image_info['Spacing'][2], "Label": prop.label, "Centroid": [prop.centroid], "Voxel Area": prop.area, "Voxel Volume": image_info['Spacing'][0] *\ image_info['Spacing'][1]*\ image_info['Spacing'][2], "Lesion Volume":image_info['Spacing'][0] *\ image_info['Spacing'][1]*\ image_info['Spacing'][2] * prop.area }, index = [0]) listed.append(image_info_df) listed_merged = pd.concat(listed) listed_merged.index = range(0, len(listed_merged), 1) # image_info_df_merged = pd.concat(image_info_df_list) # image_info_df_merged.index = range(0,len(image_info_df_merged),1) return listed_merged
while True: lines = file_to_read.readline().replace('\n', '') if not lines: break pass res.append(lines) pass return res ##############################文件主体################################################ post_session = get_info.get_session() g_id = input('请输入骑空团ID,如果直接跳过则默认读取同目录下的menber_list.txt中的成员id:') if g_id == '': menber_list = read_menber_list('menber_list.txt') else: menber_list = get_info.get_members(g_id, post_session) if len(menber_list) == 0: print("出现错误,可能是团ID有误,或者团设置为隐藏团员信息") exit() point_list = [] for i in range(len(menber_list)): point_list.append(get_info.get_info(str(menber_list[i]), post_session)) with open("point.json", 'w', encoding='utf-8') as json_file: json.dump(point_list, json_file, ensure_ascii=False) deal.export_data(point_list)
from get_ids import get_ids from get_info import get_info from normalising_data import normalising_data from cleaning_data import cleaning_data from train import train if __name__ == '__main__': get_ids() get_info() normalising_data() cleaning_data() train()
def run(self): #重载Thread的run #print('Socket%s Start!' % self.index) headers = {} self.handshaken = False while True: if self.handshaken == False: #print ('Socket%s Start Handshaken with %s!' % (self.index,self.remote)) if self.remote[0] == "127.0.0.1": while True: self.conn.settimeout(1200) info_tmp = bytes.decode(self.conn.recv(1024)) self.buffer += info_tmp if not info_tmp: break else: self.buffer = bytes.decode(self.conn.recv(104857600)) info = self.buffer if self.buffer.find('\r\n\r\n') != -1: header, data = self.buffer.split('\r\n\r\n', 1) for line in header.split("\r\n")[1:]: key, value = line.split(": ", 1) headers[key] = value headers["Location"] = ("ws://%s%s" % (headers["Host"], self.path)) #print headers key = headers['Sec-WebSocket-Key'] token = b64encode( hashlib.sha1(str.encode(str(key + self.GUID))).digest()) handshake="HTTP/1.1 101 Switching Protocols\r\n"\ "Upgrade: websocket\r\n"\ "Connection: Upgrade\r\n"\ "Sec-WebSocket-Accept: "+bytes.decode(token)+"\r\n"\ "WebSocket-Origin: "+str(headers["Origin"])+"\r\n"\ "WebSocket-Location: "+str(headers["Location"])+"\r\n\r\n" self.conn.send(str.encode(str(handshake))) self.handshaken = True #print ('Socket %s Handshaken with %s success!' %(self.index, self.remote)) #sendMessage(u'Welcome, ' + self.name + ' !') #像浏览器发送消息 #sendMessage(unicode("欢迎使用CheungSSH Web版本! QQ:2418731289").encode('utf-8')) #像浏览器发送消息 #os.system("python %s/cheung/bin/get_info.py"%HOME) import get_info first_info = """{"%s":%s}""" % (self.ie_key, get_info.get_info()) #print first_info,555555555555 sendMessage(first_info) #sendMessage(unicode("""<script type="text/javascript">alert("欢迎使用CheungSSH Web版本! QQ:2418731289")</script>""").encode('utf-8')) #像浏览器发送消息 #del get_info i = 1 #print self.buffer_utf8 #sendMessage('开始接收...') #像浏览器发送消息 self.buffer_utf8 = "" g_code_length = 0 else: #info=json.dumps(info,encoding='utf8',ensure_ascii=False) #info=str(info) T = sendMessage(info) if T == 'Error': print "断开接收通道" #deleteconnection(str(self.index)) break else: #global g_code_length global g_header_length mm = '' try: while True: self.conn.settimeout(2) mm += self.conn.recv(1024) except Exception: pass if len(mm) <= 0: continue if g_code_length == 0: get_datalength(mm) #接受的长度 self.length_buffer = self.length_buffer + len(mm) self.buffer = self.buffer + mm if self.length_buffer - g_header_length < g_code_length: continue else: self.buffer_utf8 = parse_data(self.buffer, self.ie_key) #utf8 msg_unicode = str(self.buffer_utf8).decode( 'utf-8', 'ignore') #unicode if msg_unicode == 'quit': #print (u'Socket%s Logout!' % (self.index)) nowTime = time.strftime('%H:%M:%S', time.localtime(time.time())) #sendMessage(u'%s %s say: %s' % (nowTime, self.remote, self.name+' Logout')) #sendMessage(u'AAA') #deleteconnection(str(self.index)) self.conn.close() break #退出线程 else: #print (u'Socket%s Got msg:%s from %s!' % (self.index, msg_unicode, self.remote)) nowTime = time.strftime(u'%H:%M:%S', time.localtime(time.time())) #sendMessage(u'%s %s say: %s' % (nowTime, self.remote, msg_unicode)) #sendMessage(u'Starting') #重置buffer和bufferlength self.buffer_utf8 = "" self.buffer = "" g_code_length = 0 self.length_buffer = 0 self.buffer = ""
def run(self,_): rospy.loginfo('[%s]: Running', self.name) t0 = rospy.Time.now() # New pc available if not self.new_pc: rospy.loginfo('[%s]: No new pointcloud', self.name) return self.new_pc = False # Retrieve image try: pc = self.pc header = self.pc.header if not self.init: rospy.loginfo('[%s]: Start pc segmentation', self.name) except: rospy.logwarn('[%s]: There is no input pc to run the segmentation', self.name) return # Set model if not self.init: self.set_model() self.init = True pc_np = self.pc2array(pc) if pc_np.shape[0] < 2000: # return if points < thr //PARAM rospy.loginfo('[%s]: Not enough input points', self.name) return pc_np[:, 2] *= -1 # flip Z axis # //PARAM #pc_np[:, 1] *= -1 # flip Y axis # //PARAM xyz_min = np.amin(pc_np, axis=0)[0:3] # get pointcloud mins pc_np[:, 0:3] -= xyz_min # move pointcloud to origin xyz_max = np.amax(pc_np, axis=0)[0:3] # get pointcloud maxs t1 = rospy.Time.now() # divide data into blocks of size "block" with an stride "stride", in each block select random "points_sub" points data_sub, label_sub = indoor3d_util.room2blocks_plus_normalized_parsed(pc_np, xyz_max, self.points_sub, block_size=self.block_sub, stride=self.stride_sub, random_sample=False, sample_num=None, sample_aug=1) # subsample PC for evaluation if data_sub.size == 0: # return if room2blocks_plus_normalized_parsed has deleted all blocks rospy.loginfo('[%s]: No data after block-stride', self.name) return t2 = rospy.Time.now() with tf.Graph().as_default(): pred_sub = self.evaluate(data_sub, label_sub, xyz_max) # evaluate PC if pred_sub.size == 0: # return if no prediction rospy.loginfo('[%s]: No prediction', self.name) return pred_sub = np.unique(pred_sub, axis=0) # delete duplicates from room2blocks (if points in block < points_sub, it duplicates them) pred_sub[:, 0:3] += xyz_min # recover original position pred_sub[:, 2] *= -1 # unflip Z axis t3 = rospy.Time.now() pc_np_base = pred_sub.copy() # for print only pc_np_base = np.delete(pc_np_base,6,1) # delete class prediction # downsample prediction to 128 if its not already on 128 if self.points_sub >= 128: # //PARAM down = 128/self.points_sub # //PARAM n_idx_pred_sub_down = int(pred_sub.shape[0] * down) idx_pred_sub_down = np.random.choice(pred_sub.shape[0], n_idx_pred_sub_down, replace=False) pred_sub = pred_sub[idx_pred_sub_down, 0:7] pred_sub_pipe = pred_sub[pred_sub[:,6] == [self.labels["pipe"]]] # get points predicted as pipe pred_sub_valve = pred_sub[pred_sub[:,6] == [self.labels["valve"]]] # get points predicted as valve # get valve instances instances_ref_valve_list, pred_sub_pipe_ref, stolen_list = get_instances.get_instances(pred_sub_valve, self.dim_v, self.rad_v, self.min_p_v, ref=True, ref_data = pred_sub_pipe, ref_rad = 0.1) # //PARAM #instances_ref_valve_list, pred_sub_pipe_ref, stolen_list = get_instances.get_instances_o3d(pred_sub_valve, self.dim_v, self.rad_v, self.min_p_v, ref=True, ref_data = pred_sub_pipe, ref_rad = 0.1) # project valve isntances, removed because produced errors on valve matching due to the points gathered @ the floor #instances_ref_valve_list = project_inst.project_inst(instances_ref_valve_list, pc_proj) # pc_np_base t4 = rospy.Time.now() # get valve information info_valves_list = list() for i, inst in enumerate(instances_ref_valve_list): # for each valve instance # transform instance to o3d pointcloud xyz_central = np.mean(inst, axis=0)[0:3] # get isntance center inst[:, 0:3] -= xyz_central # move center to origin inst_o3d = o3d.geometry.PointCloud() inst_o3d.points = o3d.utility.Vector3dVector(inst[:,0:3]) inst_o3d.colors = o3d.utility.Vector3dVector(inst[:,3:6]) inst_o3d.estimate_normals(search_param=o3d.geometry.KDTreeSearchParamHybrid(radius=0.01, max_nn=15)) # compute normal inst_o3d.orient_normals_to_align_with_direction(orientation_reference=([0, 0, 1])) # align normals inst[:, 0:3] += xyz_central # recover original position info_valve = get_info.get_info(inst_o3d, self.targets_list, method="matching") # get valve instance info list([fitness1, rotation1],[fitness2, rotation2], ...) len = len(targets_list) max_info = max(info_valve) # the max() function compares the first element of each info_list element, which is fitness) max_idx = info_valve.index(max_info) # idx of best valve match rad = math.radians(max_info[1]) vector = np.array([math.cos(rad), math.sin(rad), 0]) # get valve unit vector at zero vector = vector*0.18 # resize vector to valve size //PARAM info_valves_list.append([xyz_central, max_info, vector, max_idx, inst[:,0:3]]) # append valve instance info # print best valve matching #trans = np.eye(4) #trans[:3,:3] = inst_o3d.get_rotation_matrix_from_xyz((0,0, -rad)) #get_info.draw_registration_result(inst_o3d, self.targets_list[max_idx], trans) # based on valve fitness, delete it and return stolen points to pipe prediction descart_valves_list = [i for i, x in enumerate(info_valves_list) if x[1][0] < 0.4] # if max fitnes < thr //PARAM for i in descart_valves_list: print("Valve descarted") descarted_points = np.vstack(instances_ref_valve_list[i]) # notate points to discard stolen_idx = list(np.vstack(stolen_list[i])[:,0].astype(int)) # get stolen idx stolen_cls = np.vstack(stolen_list[i])[:,1].astype(int) # get stolen class stolen_cls = stolen_cls.reshape(stolen_cls.shape[0],1) # reshape stolen class if len(stolen_idx)>0: # if there were stolen points stolen_points = descarted_points[stolen_idx, :-2] # recover stolen points stolen_points = np.concatenate((stolen_points,stolen_cls),axis=1) # concatenate stolen points and stolen class pred_sub_pipe_ref = np.concatenate((pred_sub_pipe_ref,stolen_points),axis=0) # add points and class pipe prediction points for index in sorted(descart_valves_list, reverse=True): # delete discarted valve info del info_valves_list[index] del instances_ref_valve_list[index] # for print only t5 = rospy.Time.now() # get pipe instances instances_ref_pipe_list, _, _ = get_instances.get_instances(pred_sub_pipe_ref, self.dim_p, self.rad_p, self.min_p_p) #instances_ref_pipe_list, _, _ = get_instances.get_instances_o3d(pred_sub_pipe_ref, self.dim_p, self.rad_p, self.min_p_p) t6 = rospy.Time.now() info_pipes_list = list() info_connexions_list = list() k_pipe = 0 for i, inst in enumerate(instances_ref_pipe_list): # for each pipe instance # transform instance to o3d pointcloud inst_o3d = o3d.geometry.PointCloud() inst_o3d.points = o3d.utility.Vector3dVector(inst[:,0:3]) inst_o3d.colors = o3d.utility.Vector3dVector(inst[:,3:6]/255) info_pipe = get_info.get_info(inst_o3d, models=0, method="skeleton") # get pipe instance info list( list( list(chain1, start1, end1, elbow_list1, vector_chain_list1), ...), list(connexions_points)) for j, pipe_info in enumerate(info_pipe[0]): # stack pipes info inst_list = list() inst_list.append(i) pipe_info.append(inst_list) info_pipes_list.append(pipe_info) for j, connexion_info in enumerate(info_pipe[1]): # stack conenexions info connexion_info[1] = [x+k_pipe for x in connexion_info[1]] info_connexions_list.append(connexion_info) k_pipe += len(info_pipe[0]) # update actual pipe idx t7 = rospy.Time.now() info_pipes_list_copy = copy.deepcopy(info_pipes_list) info_connexions_list_copy = copy.deepcopy(info_connexions_list) info_pipes_list2, info_connexions_list2 = get_info.unify_chains(info_pipes_list_copy, info_connexions_list_copy) t8 = rospy.Time.now() info_valves_list_copy = copy.deepcopy(info_valves_list) info_valves_list2 = get_info.refine_valves(info_valves_list_copy, info_pipes_list2) t9 = rospy.Time.now() info1 = [info_pipes_list, info_connexions_list, info_valves_list, instances_ref_pipe_list] # TODO publish info info2 = [info_pipes_list2, info_connexions_list2, info_valves_list, instances_ref_pipe_list] # TODO publish info info3 = [info_pipes_list2, info_connexions_list2, info_valves_list2, instances_ref_pipe_list] # TODO publish info if len(info3)>0: info_array = get_info.info_to_array(info3) pc_info = self.array2pc_info(header, info_array) self.pub_pc_info.publish(pc_info) out = True if out == True: name = str(time.time()) name = name.replace('.', '') path_out1 = os.path.join("/home/miguel/Desktop/PIPES2/out_ros", name+"_1.ply") get_info.info_to_ply(info1, path_out1) path_out2 = os.path.join("/home/miguel/Desktop/PIPES2/out_ros", name+"_2.ply") get_info.info_to_ply(info2, path_out2) path_out3 = os.path.join("/home/miguel/Desktop/PIPES2/out_ros", name+"_3.ply") get_info.info_to_ply(info3, path_out3) # print info print(" ") print("INFO VALVES:") for valve in info_valves_list: valve.pop(-1) print(valve) print(" ") print("INFO VALVES REF:") for valve in info_valves_list2: valve.pop(-2) print(valve) print(" ") print("INFO PIPES:") for pipe1 in info_pipes_list: pipe1.pop(0) print(pipe1) print(" ") print("INFO PIPES REF:") for pipe2 in info_pipes_list2: pipe2.pop(0) print(pipe2) print(" ") print("INFO CONNEXIONS:") for connexion in info_connexions_list: print(connexion) print(" ") print("INFO CONNEXIONS REF:") for connexion in info_connexions_list2: print(connexion) print(" ") # publishers i = len(instances_ref_valve_list) if len(instances_ref_valve_list)>0: instances_ref_proj_valve = np.vstack(instances_ref_valve_list) if len(instances_ref_pipe_list)>0: instances_ref_pipe = np.vstack(instances_ref_pipe_list) instances_ref_pipe[:,7] = instances_ref_pipe[:,7]+i if len(instances_ref_valve_list)>0 and len(instances_ref_pipe_list)>0: instances_ref = np.concatenate((instances_ref_proj_valve, instances_ref_pipe), axis=0) elif len(instances_ref_valve_list)==0 and len(instances_ref_pipe_list)>0: instances_ref = instances_ref_pipe elif len(instances_ref_valve_list)>0 and len(instances_ref_pipe_list)==0: instances_ref = instances_ref_proj_valve else: instances_ref = None if instances_ref is None: # if instances were not found rospy.loginfo('[%s]: No instances found', self.name) return for i in range(pred_sub.shape[0]): color = self.label2color[pred_sub[i,6]] pred_sub[i,3] = color[0] pred_sub[i,4] = color[1] pred_sub[i,5] = color[2] for i in range(instances_ref.shape[0]): color = self.col_inst[instances_ref[i,7]] instances_ref[i,3] = color[0] instances_ref[i,4] = color[1] instances_ref[i,5] = color[2] pc_base = self.array2pc(header, pc_np_base) pc_seg = self.array2pc(header, pred_sub) pc_inst = self.array2pc(header, instances_ref) self.pub_pc_base.publish(pc_base) self.pub_pc_seg.publish(pc_seg) self.pub_pc_inst.publish(pc_inst) t10 = rospy.Time.now() time_read = t1-t0 time_blocks = t2-t1 time_inferference = t3-t2 time_instaces_valve = t4-t3 time_instaces_pipe = t6-t5 time_instaces = time_instaces_valve + time_instaces_pipe time_info_valve = t5-t4 time_info_pipe = t7-t6 time_info = time_info_valve + time_info_pipe time_ref_valve = t9-t8 time_ref_pipe = t8-t7 time_ref = time_ref_valve + time_ref_pipe time_publish = t10-t9 time_total = t10-t0 # print time info rospy.loginfo('[%s]: INFO TIMES:', self.name) print("") rospy.loginfo('[%s]: Pc processing took %.2f seconds. Split into:', self.name, time_total.secs + time_total.nsecs*1e-9) rospy.loginfo('[%s]: Reading -------- %.2f seconds (%i%%)', self.name, time_read.secs + time_read.nsecs*1e-9, (time_read/time_total)*100) rospy.loginfo('[%s]: Blocks --------- %.2f seconds (%i%%)', self.name, time_blocks.secs + time_blocks.nsecs*1e-9, (time_blocks/time_total)*100) rospy.loginfo('[%s]: Inference ------ %.2f seconds (%i%%)', self.name, time_inferference.secs + time_inferference.nsecs*1e-9, (time_inferference/time_total)*100) rospy.loginfo('[%s]: Instances ------ %.2f seconds (%i%%)', self.name, time_instaces.secs + time_instaces.nsecs*1e-9, (time_instaces/time_total)*100) rospy.loginfo('[%s]: - Valve - %.2f seconds (%i%%)', self.name, time_instaces_valve.secs + time_instaces_valve.nsecs*1e-9, (time_instaces_valve/time_total)*100) rospy.loginfo('[%s]: - Pipe -- %.2f seconds (%i%%)', self.name, time_instaces_pipe.secs + time_instaces_pipe.nsecs*1e-9, (time_instaces_pipe/time_total)*100) rospy.loginfo('[%s]: Info ----------- %.2f seconds (%i%%)', self.name, time_info.secs + time_info.nsecs*1e-9, (time_info/time_total)*100) rospy.loginfo('[%s]: - Valve - %.2f seconds (%i%%)', self.name, time_info_valve.secs + time_info_valve.nsecs*1e-9, (time_info_valve/time_total)*100) rospy.loginfo('[%s]: - Pipe -- %.2f seconds (%i%%)', self.name, time_info_pipe.secs + time_info_pipe.nsecs*1e-9, (time_info_pipe/time_total)*100) rospy.loginfo('[%s]: Refine --------- %.2f seconds (%i%%)', self.name, time_ref.secs + time_ref.nsecs*1e-9, (time_ref/time_total)*100) rospy.loginfo('[%s]: - Valve - %.2f seconds (%i%%)', self.name, time_ref_valve.secs + time_ref_valve.nsecs*1e-9, (time_ref_valve/time_total)*100) rospy.loginfo('[%s]: - Pipe -- %.2f seconds (%i%%)', self.name, time_ref_pipe.secs + time_ref_pipe.nsecs*1e-9, (time_ref_pipe/time_total)*100) rospy.loginfo('[%s]: Publish -------- %.2f seconds (%i%%)', self.name, time_publish.secs + time_publish.nsecs*1e-9, (time_publish/time_total)*100) print(" ") print(" ") print("--------------------------------------------------------------------------------------------------") print("--------------------------------------------------------------------------------------------------") print(" ") print(" ")
out_image_path=out_image_path, segmentation_or_image="segmentation_02", is_label=True) # combining the two resampled images resampled_segmentation_01_path = os.path.join( case_path, "resampled_segmentation_01.nii") reader = sitk.ImageFileReader() reader.SetFileName(resampled_segmentation_01_path) image1 = reader.Execute() resampled_segmentation_02_path = os.path.join( case_path, "resampled_segmentation_02.nii") reader = sitk.ImageFileReader() reader.SetFileName(resampled_segmentation_02_path) image2 = reader.Execute() # writing the combined image combine_image = 1 * image1 + 2 * image2 combine_image_path = os.path.join(case_path, "combine_image.nii") writer = sitk.ImageFileWriter() writer.SetFileName(combine_image_path) writer.Execute(combine_image) seg_path_for_check = r"C:\Users\michaely\Documents\hiwi\DeepMedic Stuff\Convert to nifti 20052020\3DSlicer_Niere\Phlebolithen\P046\combine_image.nii" get_info(seg_path_for_check) get_info( r"\Users\michaely\Documents\hiwi\DeepMedic Stuff\Convert to nifti 20052020\DICOM_Niere\DICOM\P106\resampled_image.nii" )
def get_patients_image_info(path, keyword1='new', keyword2 = '.nii'): ''' Extracts the information about a particular type of file of all the patients. Parameters ---------- path : String Path to the folder containing all cases folder. The case folder each contains patients image files. keyword1 : String, optional Filter to look for certain file(s) containing the inputted string. The default is 'new'. keyword2 : String, optional Filter to look for certain file(s) containing the inputted string. The default is '.nii'. Returns ------- image_info_df_merged : DataFrame DataFrame containing information about a particular type of file of all the patients. ''' cases = os.listdir(path) image_info_df_list = [] for case in cases: case_path = os.path.join(path, case) if os.path.isdir(case_path): patients = os.listdir(case_path) for patient in patients: patient_path = os.path.join(case_path, patient) patient_dir_content = os.listdir(patient_path) for content in patient_dir_content: if keyword1 in content: filtered_0 = content if keyword2 in filtered_0: filtered_1 = filtered_0 print("Patient", patient, "of", case, "is being processed.") image_path = os.path.join(patient_path,filtered_1) image_info = get_info(image_path) reader = sitk.ImageFileReader() reader.SetFileName(image_path) image = reader.Execute(); data = sitk.GetArrayFromImage(image) data.shape Stone_label = data[:,:,:,0] Phlebolite_label = data[:,:,:,1] # Getting info about the Stones final_ROIs_1 = Stone_label label_final_ROIs_1 = label(final_ROIs_1) props_1 = regionprops(label_final_ROIs_1) listed_1 = [pd.DataFrame()] # sitk_image = sitk.ReadImage(image_path) # data = sitk.GetArrayFromImage(sitk_image) for prop in props_1: df = pd.DataFrame({"Case":case, "Patient":patient, "File_name":filtered_1, "Stone or Phlebolite":"Stone", "Origin_X":image_info['Origin'][0], "Origin_Y":image_info['Origin'][1], "Origin_Z":image_info['Origin'][2], "Size_X":image_info['Size'][0], "Size_Y":image_info['Size'][1], "Size_Z":image_info['Size'][2], "Spacing_X":image_info['Spacing'][0], "Spacing_Y":image_info['Spacing'][1], "Spacing_Z":image_info['Spacing'][2], "Label":prop.label, "Centroid": [prop.centroid], "Voxel Area":prop.area, "Voxel Volume": image_info['Spacing'][0] *\ image_info['Spacing'][1]*\ image_info['Spacing'][2], "Lesion Volume":image_info['Spacing'][0] *\ image_info['Spacing'][1]*\ image_info['Spacing'][2] * prop.area}, index = [0]) listed_1.append(df) listed_merged_1 = pd.concat(listed_1) listed_merged_1.index = range(0,len(listed_merged_1),1) # Getting info about the Phlebolites final_ROIs_2 = Phlebolite_label label_final_ROIs_2 = label(final_ROIs_2) props_2 = regionprops(label_final_ROIs_2) listed_2 = [pd.DataFrame()] for prop in props_2: df = pd.DataFrame({"Case":case, "Patient":patient, "File_name":filtered_1, "Stone or Phlebolite":"Phlebolite", "Origin_X":image_info['Origin'][0], "Origin_Y":image_info['Origin'][1], "Origin_Z":image_info['Origin'][2], "Size_X":image_info['Size'][0], "Size_Y":image_info['Size'][1], "Size_Z":image_info['Size'][2], "Spacing_X":image_info['Spacing'][0], "Spacing_Y":image_info['Spacing'][1], "Spacing_Z":image_info['Spacing'][2], "Label":prop.label, "Centroid": [prop.centroid], "Voxel Area":prop.area, "Voxel Volume": image_info['Spacing'][0] *\ image_info['Spacing'][1]*\ image_info['Spacing'][2], "Lesion Volume":image_info['Spacing'][0] *\ image_info['Spacing'][1]*\ image_info['Spacing'][2] * prop.area}, index = [0]) listed_2.append(df) listed_merged_2 = pd.concat(listed_2) listed_merged_2.index = range(0,len(listed_merged_2),1) result_list_for_patient = pd.concat([listed_merged_1,listed_merged_2]) image_info_df_list.append(result_list_for_patient) image_info_df_merged = pd.concat(image_info_df_list) image_info_df_merged.index = range(0,len(image_info_df_merged),1) return image_info_df_merged
#Main Program for page in range(pages): #Get ID number = page * num_each_page ID_list = get_ID(number, tag_name, pattern_text, headers, id_url) time.sleep(3) print('Pages Cruising Procedure: '\ '{:.2%}'.format(page/pages)) for single_id in ID_list: my_index += 1 #Get information of id try: ID_info = get_info(single_id, headers, info_url) #Store information insert_data = '''INSERT INTO {tb_name} \ (my_index, id, title, chi_title, average_rate, raters) \ VALUES ({my_index}, {dbid}, "{title}", "{chi_title}", {average_rate}, {raters})\ ;'''.format(tb_name = table_name, my_index = my_index, dbid = ID_info['dbid'], title = ID_info['title'].replace('"', "'"), chi_title = ID_info['chi_title'].replace('"', "'"), average_rate = ID_info['average'], raters = ID_info['raters'] ) # print(insert_data)
def run(self):#重载Thread的run #print('Socket%s Start!' % self.index) headers = {} self.handshaken = False while True: if self.handshaken == False: #print ('Socket%s Start Handshaken with %s!' % (self.index,self.remote)) if self.remote[0]=="127.0.0.1": while True: self.conn.settimeout(1200) info_tmp=bytes.decode(self.conn.recv(1024)) self.buffer += info_tmp if not info_tmp: break else: self.buffer=bytes.decode(self.conn.recv(104857600)) info=self.buffer if self.buffer.find('\r\n\r\n') != -1: header, data = self.buffer.split('\r\n\r\n', 1) for line in header.split("\r\n")[1:]: key, value = line.split(": ", 1) headers[key] = value headers["Location"] = ("ws://%s%s" %(headers["Host"], self.path)) #print headers key = headers['Sec-WebSocket-Key'] token = b64encode(hashlib.sha1(str.encode(str(key + self.GUID))).digest()) handshake="HTTP/1.1 101 Switching Protocols\r\n"\ "Upgrade: websocket\r\n"\ "Connection: Upgrade\r\n"\ "Sec-WebSocket-Accept: "+bytes.decode(token)+"\r\n"\ "WebSocket-Origin: "+str(headers["Origin"])+"\r\n"\ "WebSocket-Location: "+str(headers["Location"])+"\r\n\r\n" self.conn.send(str.encode(str(handshake))) self.handshaken = True #print ('Socket %s Handshaken with %s success!' %(self.index, self.remote)) #sendMessage(u'Welcome, ' + self.name + ' !') #像浏览器发送消息 #sendMessage(unicode("欢迎使用CheungSSH Web版本! QQ:2418731289").encode('utf-8')) #像浏览器发送消息 #os.system("python %s/cheung/bin/get_info.py"%HOME) import get_info first_info="""{"%s":%s}"""%(self.ie_key,get_info.get_info()) #print first_info,555555555555 sendMessage(first_info) #sendMessage(unicode("""<script type="text/javascript">alert("欢迎使用CheungSSH Web版本! QQ:2418731289")</script>""").encode('utf-8')) #像浏览器发送消息 i=1 #print self.buffer_utf8 #sendMessage('开始接收...') #像浏览器发送消息 self.buffer_utf8 = "" g_code_length = 0 else: #info=json.dumps(info,encoding='utf8',ensure_ascii=False) #info=str(info) T=sendMessage(info) if T=='Error': print "断开接收通道" deleteconnection(str(self.index)) break else: global g_header_length mm='' try: while True: self.conn.settimeout(0.5) mm+=self.conn.recv(1024) except Exception: pass if len(mm) <= 0: continue if g_code_length == 0: get_datalength(mm) #接受的长度 self.length_buffer = self.length_buffer + len(mm) self.buffer = self.buffer + mm if self.length_buffer - g_header_length < g_code_length : continue else : self.buffer_utf8 = parse_data(self.buffer,self.ie_key) #utf8 msg_unicode = str(self.buffer_utf8).decode('utf-8', 'ignore') #unicode if msg_unicode=='quit': #print (u'Socket%s Logout!' % (self.index)) nowTime = time.strftime('%H:%M:%S',time.localtime(time.time())) #sendMessage(u'%s %s say: %s' % (nowTime, self.remote, self.name+' Logout')) #sendMessage(u'AAA') #deleteconnection(str(self.index)) self.conn.close() break #退出线程 else: #print (u'Socket%s Got msg:%s from %s!' % (self.index, msg_unicode, self.remote)) nowTime = time.strftime(u'%H:%M:%S',time.localtime(time.time())) #sendMessage(u'%s %s say: %s' % (nowTime, self.remote, msg_unicode)) #sendMessage(u'Starting') #重置buffer和bufferlength self.buffer_utf8 = "" self.buffer = "" g_code_length = 0 self.length_buffer = 0 self.buffer = ""
inst[:, 0:3] -= xyz_central # move instance to origin inst_o3d = o3d.geometry.PointCloud() inst_o3d.points = o3d.utility.Vector3dVector(inst[:, 0:3]) inst_o3d.colors = o3d.utility.Vector3dVector(inst[:, 3:6]) inst_o3d.estimate_normals( search_param=o3d.geometry.KDTreeSearchParamHybrid( radius=0.01, max_nn=15)) inst_o3d.orient_normals_to_align_with_direction( orientation_reference=([0, 0, 1])) inst[:, 0: 3] += xyz_central # move instance to original position info_valve = get_info.get_info(inst_o3d, targets_list, method="matching") max_info = max(info_valve) max_idx = info_valve.index(max_info) rad = math.radians(max_info[1]) vector = np.array([math.cos(rad), math.sin(rad), 0]) # get valve unit vector at Z 0 vector = vector * 0.18 # resize vector to valve size //PARAM info_valves_list.append([ xyz_central, max_info, vector, max_idx, inst[:, 0:3] ])
s = requests.Session() r = s.post('https://www.meijo-u.ac.jp/academics/syllabus/find/', data={ 'data[find][fiscal_year]': ['2017'], 'data[find][faculty_id]': ['119'] }) # rootのページからdetailページのhrefを取り出してlistに保存する soup = BeautifulSoup(r.content, 'html.parser') # subject_nameにaタグ1行が入ってる subject_name = soup.select('.normal td a') # リストlinksにsubject_nameのhrefを格納する # linksの中に書く授業の詳細ページへのリンクが入ってる links = [] for name in subject_name: href = name['href'] data = href[24:] links.append(data) # scraping_data関数でインスタンスを作る department_list = [] unit_number_list = [] for link in links: department = get_info( 'https://www.meijo-u.ac.jp/academics/syllabus/find/' + link) print(department) print('完了')
import get_info name = '牛老师' age = 20 age2 = 200 try: get_info.get_info(name, age) get_info.get_info2(name, age2) except (ValueError, AssertionError) as e: print('Error:', e)
date_range_end = dt.date(year=2019, month=1, day=15) # Latest session date to include (inclusive) ##### # PIPELINE ##### if __name__ == "__main__": # Load spreadsheet headers and save original column order (for when we write the CSV later) ed = pd.read_csv(eeg_details_path, header=1, nrows=0) esf = pd.read_csv(eeg_sub_files_path, header=1, nrows=0) ed_col_order = ed.columns esf_col_order = esf.columns # Load subject and session information into a data frame info = get_info(date_range_start, date_range_end, ltp_path=ltp_path) # Load extra manually-compiled information (date of birth, head circumference, cap size) into the data frame info = get_extra_info(info, extra_info_path) # Identify data file paths for each session, and add them to the data frame info = get_filepaths(info, ltp_path=ltp_path, protocols_path=protocols_path) # Fill out the two spreadsheets with ed, esf = fill_info(ed, esf, info) # Write data to spreadsheets write_info(ed, eeg_details_path, ed_col_order, esf, eeg_sub_files_path, esf_col_order)
def info(): return str(get_info(get_all_data(dataframe)))