def setup_db(): ret = Config().get_content('mysql') SQLALCHEMY_DATABASE_URI = "mysql+pymysql://{}:{}@{}:{}/{}?charset=utf8".format( ret["user"], ret["password"], ret["host"], ret["port"], ret["db_name"]) app.config['sqlalchemy_engine'] = create_engine( SQLALCHEMY_DATABASE_URI, pool_size=20, pool_recycle=3600, max_overflow=10, encoding='utf-8', )
def post(self, *args, **kwargs): file_metas = self.request.files["file"] user_id = self.get_arguments("user_id")[0] project_name = self.get_arguments("project_name")[0] if not (user_id and project_name): self.write(json.dumps(result(4000, value=None), ensure_ascii=False)) return for meta in file_metas: file_name = meta['filename'] if file_name.split(".")[-1] != "sav": self.write(json.dumps(result(4000, value=None), ensure_ascii=False)) return #这块对路径的修改和文件目录的创建 file_path = Config().get_content('filepath')['upload_path'] # 判断file_path 下面有没有user_id 文件目录, 没有创建一个 user_file_path = os.path.join(file_path, str(user_id)) time_now = datetime.datetime.now().strftime("%Y-%m-%d") user_subfilepath = os.path.join(user_file_path, time_now) if not os.path.exists(user_file_path): os.makedirs(user_file_path) if not os.path.exists(user_subfilepath): os.makedirs(user_subfilepath) if os.path.exists(os.path.join(user_subfilepath, file_name)): os.renames(os.path.join(user_subfilepath, file_name), os.path.join(user_subfilepath, file_name+".bak")) # file_path = os.path.join("file", file_name) # write input file --file with open(os.path.join(user_subfilepath, file_name), 'wb') as up: up.write(meta['body']) a = time.time() res = yield self.sleep(user_subfilepath, file_name, user_id, project_name) b = time.time() - a print(b) self.write(json.dumps(result(2000, value={}), ensure_ascii=False)) self.finish()
dataset_id = self.get_arguments("dataset_id")[0] res = yield self.sleep(user_id, project_name, dataset_id) self.write(json.dumps(result(2000, value={"filepath": res}), ensure_ascii=False)) self.finish() @run_on_executor def sleep(self, user_id, project_name, dataset_id): ret = requests.post('http://127.0.0.1:8001/generate_spssfile', data={"user_id": user_id, "project_name": project_name, "dataset_id": dataset_id}) return ret.text class Generate_SpssFile(RequestHandler): executor = ThreadPoolExecutor(2) @tornado.gen.coroutine def post(self, *args, **kwargs): user_id = self.get_arguments("user_id")[0] project_name = self.get_arguments("project_name")[0] dataset_id = self.get_arguments("dataset_id")[0] filepathname = spss_main(user_id, project_name, dataset_id).genreate_spss() self.write(json.dumps(result(2000, value={"filepath": filepathname}), ensure_ascii=False)) self.finish() if __name__ == '__main__': file_path = Config().get_content('filepath')['upload_path'] print(file_path)
def __init__(self, libname="user_information"): self.libname = libname self.res = base_model(libname).connect() # user_table, proj_table, dataset_table, datainfor_table self.table = Config().get_content("user_proj")
def __init__(self, conf_name): self.conf = Config().get_content(conf_name) self.conn = None
#!/usr/bin/env python # -*- coding:utf-8 -*- from sqlalchemy import create_engine import pandas as pd from common.base import Config ret = Config().get_content('notdbWangPengMysql') # 指定具体库 # SQLALCHEMY_DATABASE_URI = "mysql+pymysql://{}:{}@{}:{}/{}?charset=utf8".format(ret["user"], # ret["password"], # ret["host"], # ret["port"], # ret["db_name"]) # 不指定具体库 SQLALCHEMY_DATABASE_URI = "mysql+pymysql://{}:{}@{}:{}/{}?charset=utf8".format(ret["user"], ret["password"], ret["host"], ret["port"], '') sqlalchemy_engine = create_engine(SQLALCHEMY_DATABASE_URI, pool_size=20, pool_recycle=3600, max_overflow=10, encoding='utf-8', ) """ ----------------example:
#!/usr/bin/env python # -*- coding:utf-8 -*- from sqlalchemy import create_engine import pandas as pd from common.base import Config ret = Config().get_content('mysql') SQLALCHEMY_DATABASE_URI = "mysql+pymysql://{}:{}@{}:{}/{}?charset=utf8".format( ret["user"], ret["password"], ret["host"], ret["port"], ret["db_name"]) sqlalchemy_engine = create_engine( SQLALCHEMY_DATABASE_URI, pool_size=20, pool_recycle=3600, max_overflow=10, encoding='utf-8', )
def genreate_spss(self): self.adjust_data() mdt = my_datetime() nowtime = datetime.datetime.now().strftime("%Y%m%d") new_time1 = "%.6f" % float(time.time()) new_time3 = new_time1.split(".")[0] + new_time1.split(".")[1] filename = "u" + str( self.user_id) + "_" + str(nowtime) + "_" + str(new_time3) filepath = Config().get_content("filepath")["download_path"] if filepath: user_file_path = os.path.join(filepath, str(self.user_id)) time_now = datetime.datetime.now().strftime("%Y-%m-%d") user_subfilepath = os.path.join(user_file_path, time_now) if not os.path.exists(user_file_path): os.makedirs(user_file_path) if not os.path.exists(user_subfilepath): os.makedirs(user_subfilepath) else: filepath = os.path.join(os.path.dirname(os.path.dirname(__file__)), "download") user_file_path = os.path.join(filepath, str(self.user_id)) time_now = datetime.datetime.now().strftime("%Y-%m-%d") user_subfilepath = os.path.join(user_file_path, time_now) if not os.path.exists(user_file_path): os.makedirs(user_file_path) if not os.path.exists(user_subfilepath): os.makedirs(user_subfilepath) savFileName = os.path.join(user_subfilepath, filename + ".sav") print(self.varLabels) with SavWriter(savFileName=savFileName, varNames=self.varNames, varTypes=self.varTypes, formats=self.formats, varLabels=self.varLabels, valueLabels=self.valueLabels, ioUtf8=True, columnWidths={}) as writer: for row_data in self.my_data: sub_li = [] for i in range(len(self.my_columns_types)): sub_data = row_data[self.varNames[i]] if self.my_columns_types[i] == "VARCHAR": sub_li.append(sub_data) elif self.my_columns_types[i] == "DATETIME": aaa = mdt.become_str(sub_data) sub_li.append( writer.spssDateTime(bytes(aaa, 'utf-8'), '%Y-%m-%d %H:%M:%S')) elif self.my_columns_types[i] == "DATE": sub_li.append( writer.spssDateTime('%s' % sub_data, '%Y-%m-%d')) else: sub_li.append(sub_data) self.data.append(sub_li) writer.writerows(self.data) return savFileName