def CompanyInfoThread(category): # 建立SQL連線 conn_cfg = {'host': '', 'user': '', 'password': '', 'db': ''} conn = pymysql.connect(**conn_cfg) cursor = conn.cursor() # 原sql指令為Like '_____' %,在python最後要打兩個%,只打一個無法運作 sql = "select distinct `公司連結` from db_104.job_link where `職類編號` LIKE '%s%%'" % category cursor.execute(sql) companylink = cursor.fetchall() companylink = [i[0] for i in companylink] # 建立職缺連結list conn.close() # 依序讀取公司連結,將公司資訊存成json for l in companylink: try: url = "https://" + l content = Crawler.company_info(url) dn = "[directory]" fn = url.split("company/")[1].split("?")[0] if not os.path.exists(dn): os.makedirs(dn) f = open(dn + fn + ".json", "w", encoding="utf-8") json.dump(content, f) f.close() print(url) print("職務類型", category, "的公司:", fn, "complete") except: pass