Пример #1
0
 def __init__(self, baseurl=None, config=None, username=None, password=None,profile=None):
     config=Config(config,profile=profile)
     if not baseurl:
         baseurl=config.getconfig("api").baseurl
     if not baseurl:
         raise Exception('Server requires baseurl')
     elif type(baseurl) is not str:
         # Handle unparsed URLs as baseurls
         baseurl=unparseurl(baseurl)
     if not urlparse(baseurl).hostname:
         raise Exception('Bad baseurl arg',baseurl)
     if baseurl.endswith('/api/'):
         baseurl=baseurl
     elif baseurl.endswith('/'):
         baseurl=baseurl+'api/'
     else: 
         baseurl=baseurl+'/api/'
     if not username:
         username=config.getconfig(baseurl).username
     if type(username) is not str:
         raise Exception('bad username',username)
     if not password:
         password=config.getconfig(baseurl).password
     if type(password) is not str:
         # Don't pass the password arg to try and keep it out of
         # error messages which anyone might see
         raise Exception('bad password')
     self.baseurl=baseurl
     self.username=username
     self.password=password
     self.credentials = (username, password)
     self.endpoints={}
     self.__cookies = None
Пример #2
0
def all_bug_history():
    """
    根据生成的csv文件的ID获取所有bug的历史信息,将数据存入mongodb
    Return:无
    """
    csvfile = file('bug_info.csv', 'rb')
    csv_reader = csv.DictReader(csvfile)
    collection_all_bug_history = database['all_bug_history']
    for i in range(3, 10003):
        for row in csv_reader:
            all_bug_history_url = getconfig("bugzilla", "all_bug_history_url")
            all_bug_history_url = all_bug_history_url + row['ID']
            # 调用bugzilla里的bug_history函数,生成bug历史信息的xml文件
            bug_history(all_bug_history_url)
            # 获取xml文件
            xml_file = open('bug_history.xml', 'r')
            # 读取xml文件内容
            xml_str = xml_file.read()
            # 将读取的xml内容转为json
            convertedDict = xmltodict.parse(xml_str)
            jsonStr = json.dumps(convertedDict, indent=1)
            fileObject = open('bug_history.json', 'w')
            fileObject.write(jsonStr)
            fileObject.close()
            filename = 'bug_history.json'
            Timestamp = tm.strftime('%Y-%m-%d', tm.localtime(tm.time()))
            with open(filename, 'r') as f:
                content = json.load(f)
                content['ID'] = row['ID']
                content['Time'] = Timestamp
                collection_all_bug_history.insert_one(content)
    print "insert all_bug_history to mongodb successfully"
Пример #3
0
def logger(level, log_info):
    """collect the running log to log file"""
    log_path = getconfig("log", "LOG_PATH")
    log_level = getconfig("log", "LOG_LEVEL")
    log_enable = getconfig("log", "LOG_ENABLE")
    log_fname = getconfig("log", "LOG_FNAME")
    if not os.path.exists(log_path):
        os.makedirs(log_path)
    log_file = os.path.join(log_path, log_fname)
    # base on input string "DEBUG","ERROR"... get level number
    lvl = l_type_lst.index(level)

    # now, begin to write into log file
    log_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
    log_pid = os.getpid()
    log_script = sys._getframe().f_back.f_code.co_filename.split('/')[-1]
    log_method = sys._getframe().f_back.f_code.co_name
    log_line = sys._getframe().f_back.f_lineno
    with open(log_file, "a") as log:
        if lvl <= int(log_level) and bool(log_enable):
            log.write("%s %s %s %s:%s:%s %s\
\n" % (log_time, log_pid, level, log_script, log_method, log_line, log_info))
Пример #4
0
 def __init__(self):
     self.local_dir = getconfig("document", "local_dir")
     self.svn_address = getconfig("document", "svn_address")
     self.username = getconfig("document", "username")
     self.password = getconfig("document", "password")
     self.project_summary_file = getconfig("document", "file_path")
     self.project_plan_file = os.path.join(
         self.local_dir, getconfig("document", "project_plan"))
Пример #5
0
 def __init__(self,
              baseurl=None,
              config=None,
              username=None,
              password=None,
              profile=None):
     config = Config(config, profile=profile)
     if not baseurl:
         baseurl = config.getconfig("api").baseurl
     if not baseurl:
         raise Exception('Server requires baseurl')
     elif type(baseurl) is not str:
         # Handle unparsed URLs as baseurls
         baseurl = unparseurl(baseurl)
     if not urlparse(baseurl).hostname:
         raise Exception('Bad baseurl arg', baseurl)
     if baseurl.endswith('/api/'):
         baseurl = baseurl
     elif baseurl.endswith('/'):
         baseurl = baseurl + 'api/'
     else:
         baseurl = baseurl + '/api/'
     if not username:
         username = config.getconfig(baseurl).username
     if type(username) is not str:
         raise Exception('bad username', username)
     if not password:
         password = config.getconfig(baseurl).password
     if type(password) is not str:
         # Don't pass the password arg to try and keep it out of
         # error messages which anyone might see
         raise Exception('bad password')
     self.baseurl = baseurl
     self.username = username
     self.password = password
     self.credentials = (username, password)
     self.endpoints = {}
     self.__cookies = None
Пример #6
0
def get_project_count():
    """
    获取gerrit中所有项目的git信息
    获取gerrit中每一条git的具体信息
    所有数据存在json文件中
    Return: 无
    """
    # 存储project全部gerrit数据信息的json列表
    json_all_gerrit_list = []
    # 存储project一次gerrit数据信息的json列表
    json_one_gerrit_list = []
    # 判断条件,当页面没有数据时调出循环
    judgment = True
    # 分页参数
    count = 0
    # 获取gerrit url
    gerrit_url = getconfig("gerrit", "gerrit_url")
    # gerrit_url = "http://10.200.43.166:8080"
    # 全部project的gerrit信息
    url_list = "%s/changes/?q=status:merged&n=25&O=81" % gerrit_url
    while judgment:
        if count != 0:
            url_list = "%s/changes/?q=status:merged&n=25&O=81&S=%s" % (
                gerrit_url, count)
        data_dict = get_data_dict(url_list)
        # 判断获取的数据是否为空,为空跳出循环
        if data_dict:
            Timestamp = tm.strftime('%Y-%m-%d', tm.localtime(tm.time()))

            for i in data_dict:
                i['Time'] = Timestamp
            json_all_gerrit_list.extend(data_dict)
            for data in data_dict:
                # 获取历史修改的数据
                url_info = "%s/changes/%s/detail?O=404" % (gerrit_url,
                                                           data['_number'])
                history_dict = get_data_dict(url_info)
                history_dict['Time'] = Timestamp
                # 将数据加入数据列表
                json_one_gerrit_list.append(history_dict)
        else:
            judgment = False
        count = count + 25
    # 将project的总gerrit信息写入json
    with open('all_gerrit.json', 'w') as f:
        json.dump(json_all_gerrit_list, f)
    # 将每次gerrit数据信息写入json
    with open('one_gerrit_information.json', 'w') as f:
        json.dump(json_one_gerrit_list, f)
Пример #7
0
def bug_model_distribution(project_name, time):
    """
    获取项目bug模块分布
    project_name:istack-pike;istack;spsy
    time:年-月-日:2018-08-03
    :return: 字典
    """
    # 存储项目bug模块分布字典
    project_bug_model_dict = {}
    # 存储项目bug模块分布列表
    project_bug_model_list = []
    # 统计istack的bug模块分布并计数
    if project_name == 'istack-pike' or project_name == 'istack-liberty':
        istack_bug_model = getconfig("bug_model",
                                     "istack_bug_model").split(',')
        for i in range(len(istack_bug_model)):
            project_bug_model_dict[istack_bug_model[i]] = 0
        project = define_project_name(project_name, "bugzilla")
        istack_model_list = bug_query(project, time)
        if istack_model_list:
            for istack_data_model in istack_model_list:
                for i in range(len(istack_bug_model)):
                    if istack_data_model['Comp'] == istack_bug_model[i]:
                        if project_name == 'istack-pike' and istack_data_model[
                                'Vers'] == '3(Pik':
                            project_bug_model_dict[istack_bug_model[i]] += 1
                        if project_name == 'istack-liberty' and istack_data_model[
                                'Vers'] != '3(Pik':
                            project_bug_model_dict[istack_bug_model[i]] += 1
        if project_name == 'istack-liberty':
            del project_bug_model_dict['平台安全']
    # 统计其他项目的bug模块分布并计数
    else:
        project_model_list = bug_query(project_name, time)
        if project_model_list:
            for model in project_model_list:
                project_bug_model_dict[model['Comp']] = 0
        # 获取项目模块分布名
        project_bug_model = project_bug_model_dict.keys()
        for project_data in project_model_list:
            for i in range(len(project_bug_model)):
                if project_data[u'Comp'] == project_bug_model[i]:
                    project_bug_model_dict[project_bug_model[i]] += 1
        if project_bug_model_dict[u'需求管理']:
            del project_bug_model_dict[u'需求管理']
        # 转换前端所需格式
    for key, value in project_bug_model_dict.items():
        project_bug_model_list.append({'name': key, 'value': value})
    return project_bug_model_list
Пример #8
0
def all_project_bug():
    """
    获取所有项目的bug信息,将数据存入mongodb
    Return:无
    """
    collection_all_project_bug = database['all_project_bug']
    all_project_bug_url = getconfig("bugzilla", "all_project_bug_url")
    # 调用bugzilla_data里面的bug_info函数,将所有项目的bug信息存入.csv文件
    bug_info(all_project_bug_url)
    # 读取csv文件
    csvfile = file('bug_info.csv', 'rb')
    csv_reader = csv.DictReader(csvfile)
    for i in range(3, 10003):
        for row in csv_reader:
            collection_all_project_bug.insert(row)
    print "insert all_project_bug to mongodb successfully"
    csvfile.close()
Пример #9
0
    def connect_db(self, host='', user='', password='', db='', port=''):
        # if host == '':
        config_url = self.apppath + '/config/config.ini'
        conf = getconfig(config_url, 'mysql')

        self.host = conf['host']
        self.user = conf['user']
        self.password = conf['password']
        self.port = int(conf['port'])
        self.database = conf['database']

        self.db = pymysql.connect(host=self.host,
                                  user=self.user,
                                  password=self.password,
                                  db=self.database,
                                  port=self.port)
        # self.db = pymysql.connect(host="127.0.0.1", user="******", password="******", db="story", port=3306)
        # 获取句柄
        self.link = self.db.cursor()
        return self.link
Пример #10
0
def main():
    # Set default values for CFG file and SQLite databse locations.
    cfgfile = "/etc/dcr/settings.json"
    sqlitedb = "/etc/dcr/dcr.db"

    # Loop through all arguments.
    for arg in sys.argv:
        # Handle config file.
        if arg.startswith("cfg="):
            cfgfile = arg.split('=')[1]
        elif arg.startswith("sqlite="):
            sqlitedb = arg.split('=')[1]

    # Now connect to SQLite.
    conn = db.connect(sqlitedb)

    # Attempt to setup tables if they aren't already along with get information.
    setup.setuptables(conn)

    # Get config from JSON file.
    cfg = config.getconfig(cfgfile)

    # Connect Discord bot.
    discordbot.connect(cfg, conn)
Пример #11
0
 def __init__(self, notifier, mediamanager, config=getconfig()):
     self.notifier = notifier
     self.mediamanager = mediamanager
     self.config = config
Пример #12
0
 def __init__(self, notifier, mediamanager, config=getconfig()):
     self.notifier = notifier
     self.mediamanager = mediamanager
     self.dictionaries = dictionary.PinyinDictionary.loadall()
     self.config = config
Пример #13
0
        yield date
        date += timedelta(days=1)

def getjstime(d):
    return calendar.timegm(d.timetuple()) * 1000

startdate = datetime.strptime(opts.startdate, '%Y-%m-%d').date()
enddate = datetime.strptime(opts.enddate, '%Y-%m-%d').date()

w = csv.writer(sys.stdout, dialect='excel-tab')
w.writerow(('date', 'channel', 'c'))

j = {}

for date in daterange(startdate, enddate):
    infile = os.path.join(getconfig().minidump_storage_path, str(date.year),
                          date.strftime('%m-%d'), 'daily-summary.csv')

    if not os.path.exists(infile):
        continue

    fd = open(infile)
    r = csv.reader(fd, dialect='excel-tab')

    cdata = {}

    for channel, duration, count in r:
        count = int(count)
        if not channel in cdata:
            cdata[channel] = count
        else:
Пример #14
0
from logger import logger
from testlink_data import get_project
from testlink_data import get_testcase
from testlink_data import get_testplan
from testlink_data import get_testplan_build
from testlink_data import get_testcase_exec_result
from testlink_data import get_custom_fields
from bugzilla_data import *
from gerrit_data import get_project_count
from config import getconfig

reload(sys)
sys.setdefaultencoding('utf-8')

# 连接数据库
mongodb_ip = getconfig("mongodb", "IP")
mongodb_port = int(getconfig("mongodb", "port"))
client = MongoClient(mongodb_ip, mongodb_port)
dbs = getconfig("mongodb", "dbs")
database = client[dbs]


def all_project_bug():
    """
    获取所有项目的bug信息,将数据存入mongodb
    Return:无
    """
    collection_all_project_bug = database['all_project_bug']
    all_project_bug_url = getconfig("bugzilla", "all_project_bug_url")
    # 调用bugzilla_data里面的bug_info函数,将所有项目的bug信息存入.csv文件
    bug_info(all_project_bug_url)
Пример #15
0
################################################################
#                                                              #
#####################实现多浏览器多平台的测试#######################
#                                                              #
################################################################


import  time
from selenium import  webdriver
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
import  config
import  threading

#通过host,browser来参数化脚本
for host,browser in config.getconfig().items():
	print host
	print browser
	driver=webdriver.Remote(
		command_executor=host,
		desired_capabilities={
			'platform':'ANY',
			'browserName':browser,
			'version':'',
			'javascriptEnabled':True
		}
	)
	driver.get('http://www.baidu.com')
	driver.maximize_window()
	driver.implicitly_wait(30)
	driver.find_element_by_id('kw').send_keys('selenium grid2')
import argparse
import os
import time
import torch
import datetime
import torch.optim as optim
import numpy as np
import math
import lib.toy_data as toy_data
import lib.utils as utils
from lib.utils import count_parameters
# from src.plotter import plot_1d
from src.OTFlowProblem import *
import config

cf = config.getconfig()

if cf.gpu: # if gpu on platform
    def_viz_freq = 100
    def_batch    = 4096
    def_niter    = 1500
else:  # if no gpu on platform, assume debugging on a local cpu
    def_viz_freq = 100
    def_batch    = 2048
    def_niter    = 1000

parser = argparse.ArgumentParser('OT-Flow')
parser.add_argument(
    '--data', choices=['swissroll', '8gaussians', 'pinwheel', 'circles', 'moons', '2spirals', 'checkerboard', 'rings'],
    type=str, default='8gaussians'
)
Пример #17
0
#!/usr/bin/env python

from __future__ import print_function

import os, markdown, shutil

from bs4 import BeautifulSoup
from gittools import githead, gitorigin, githistory
from config import getconfig  # this is from config.py - feed it the ini_file string and it will return config with named keys - eg: config[ "your_config_parameter" ]

ini_file = "general.ini"

config = getconfig(ini_file)
backup_config = config  # this is so we can reload the config file and still have access to the original values if they're absent from the other ini files

md_items = os.listdir(config["document_folder"])
for md_item in md_items:
    if md_item.lower().endswith('.md'):
        input_md = md_item

md = markdown.Markdown(output_format="html5")

input_md = config["document_folder"] + input_md
loc_input_md = input_md
input_md = open(input_md, 'r').read()
input_md = md.convert(input_md)
sanitise_md = str(input_md).replace(
    ">\n", ">"
)  # this is so we can just ignore the extra sibling nodes happen after closing tags
soup = BeautifulSoup(sanitise_md, 'html5lib')
Пример #18
0
        for (key, value) in theform.items():
            if hasattr(value, 'file'):
                del theform[key]

        theform['submitted_timestamp'] = t.isoformat()

        crashid = makeuuid(t)
        dumpdir = os.path.join(config.minidump_storage_path, str(t.year),
                               t.strftime('%m-%d'), crashid)
        queueitempath = os.path.join(config.processor_queue_path, crashid)

        try:
            self.writefiles(dumpdir, dumpmap, theform)
            os.symlink(dumpdir, queueitempath)
        except:
            shutil.rmtree(dumpdir, ignore_errors=True)
            raise

        return "CrashID=bp-%s" % crashid

if __name__ == '__main__':
    config = getconfig()
    app = Collector(config)
    cherryconfig = {'global': {'server.socket_host': config.collector_addr,
                               'server.socket_port': config.collector_port,
                               'engine.autoreload.on': False,
                               'log.screen': False,
                               'log.access_file': config.collector_access_log,
                               'log.error_file': config.collector_error_log}}
    cherrypy.quickstart(app, config=cherryconfig)
Пример #19
0
      if o == '-c':
         # fichier de configuration alternatif
         configfile=a
      if o == '-s':
         silent = True
      if o == '-h':
         usage()

   if len(cmds) == 0 \
      or cmds[0] not in ('info', 'run', 'dryrun', 'undo', 'dryundo', 'change'):
      usage()

   if 'info' in cmds: silent = True
   
   # recuperation de la configuration
   conf = config.getconfig(configfile, silent=silent)
   # recuperation des taches
   todo = prep.todolist(conf, silent=silent) 

   if cmds[0] == 'run':
      jobs.dothejob(todo, action='do', silent=silent) 


   if cmds[0] == 'dryrun':
      jobs.dothejob(todo, action='do', dryrun=True,  silent=silent)


   if cmds[0] == 'undo':
      jobs.dothejob(todo, action='undo', silent=silent)

Пример #20
0
                os.unlink(linkpath)
                continue

            try:
                self.process(dumpdir)
            except KeyboardInterrupt:
                raise
            except Exception as e:
                print "[%s] Error while processing dump '%s'. Skipping.: %s" % (time.asctime(), dumpdir, e)
                traceback.print_exc(6)
                continue

            os.unlink(linkpath)

    def loop(self):
        lasttime = 0
        while True:
            if time.time() < lasttime + self.config.processor_wakeinterval:
                time.sleep(lasttime + self.config.processor_wakeinterval - time.time())
            lasttime = time.time()
            try:
                self.searchandprocess()
            except KeyboardInterrupt:
                raise
            except Exception as e:
                print "[%s] Continuing after exception: %s" % (time.asctime(), e)

if __name__ == '__main__':
    from config import getconfig
    Processor(getconfig()).loop()
Пример #21
0
 def __init__(self, config=getconfig()):
     self.config = config
Пример #22
0
#!/usr/bin/env python

from __future__ import print_function

import os, markdown, shutil

from bs4 import BeautifulSoup
from gittools import githead, gitorigin, githistory
from config import getconfig # this is from config.py - feed it the ini_file string and it will return config with named keys - eg: config[ "your_config_parameter" ]

ini_file = "general.ini"

config = getconfig( ini_file )
backup_config = config # this is so we can reload the config file and still have access to the original values if they're absent from the other ini files

md_items = os.listdir( config[ "document_folder" ] )
for md_item in md_items:
    if md_item.lower().endswith( '.md' ):
        input_md = md_item

md = markdown.Markdown( output_format = "html5" )

input_md = config[ "document_folder" ] + input_md
loc_input_md = input_md
input_md = open( input_md, 'r' ).read()
input_md = md.convert( input_md )
sanitise_md = str( input_md ).replace( ">\n", ">" ) # this is so we can just ignore the extra sibling nodes happen after closing tags
soup = BeautifulSoup( sanitise_md, 'html5lib' )

for html_elem in soup.find_all( 'h1' ):
    title_entity = html_elem.text
Пример #23
0
from get_mongo_data import feature_complicate
from get_mongo_data import bug_handing
from get_mongo_data import patch_statistics
from get_mongo_data import home_feature_bug_patch_data
from get_mongo_data import home_case_data
from get_mongo_data import time_data

from helper import week_get
from helper import time_derivate
import datetime

from get_project_case_info import proj_case_info

app = Flask(__name__)
api = Api(app)
file_path = getconfig("document", "file_path")


class DocumentCont(Resource):
    """获取文档excel中某个sheet的内容"""
    def get(self):
        project_name = request.args.get('project_name')
        if not project_name:
            logger("INFO", "It have not project_name argument")
            return {"code": 1, "message": "project_name argument is not exist"}
        logger("INFO", "start to get document by restful api")
        logger("INFO", "project name is %s" % project_name)
        project = ProjectInfo()
        project_id = project.get_projectid(project_name)
        if project_id is None:
            logger("WARN", "project %s is not exist!" % project_name)
Пример #24
0
 def __init__(self, notifier, mediamanager, config=getconfig()):
     self.notifier = notifier
     self.mediamanager = mediamanager
     self.dictionaries = dictionary.PinyinDictionary.loadall()
     self.config = config
Пример #25
0
def testConfig():
    conf = getconfig('config.ini', 'api')
    return 'Test Config.ini %s ' % (conf['port'])
Пример #26
0
from config import getconfig
from ConfigParser import NoOptionError
from logger import logger
import uniout
from helper import time_derivate
from transfer_project_name import define_project_name
from transfer_project_name import find_project
from transfer_project_name import find_gerrit_project

reload(sys)
import re

sys.setdefaultencoding('utf-8')

# 获取mongodb:IP,port,databases
databases = getconfig("mongodb", "dbs")
IP = getconfig("mongodb", "IP")
port = int(getconfig("mongodb", "port"))

client = MongoClient(IP, port)
database = client[databases]


def bug_query(project_name, time):
    """
    获取所有项目的bug列表
    :param project_name: i-stack;食品溯源联动
    :param time: 2018-08-17
    :return: list
    """
    collection_bug = database['all_project_bug']
Пример #27
0
 def __init__(self, notifier, mediamanager, config=getconfig()):
     self.notifier = notifier
     self.mediamanager = mediamanager
     self.config = config
Пример #28
0
def patch_statistics(project_name, time):
    """获取项目某一时间点下patch的数量
    
    :param project_name: 项目名称
    :param time: 打点时间
    :return: int, patch个数
    """
    logger("INFO", "project is %s, time is %s" % (project_name, time))
    # 将项目名转化为gerrit上的项目名
    project_name = find_gerrit_project(project_name)
    # mongodb gerrit集合
    collection = database.gerrit_patch_count
    # 数据打点时间
    time_pattern = re.compile(time + r'.*')
    # 申明patch总数变量
    patch_count = 0
    # 项目对应的gerrit仓库
    git_repo = []
    try:
        git_repo = getconfig("gerrit", project_name).split(",")
    except NoOptionError:
        git_repo.append(project_name)
        logger("WARN",
               "git repository about %s is not in config.ini" % project_name)
    logger("INFO", "git repository about %s is %s" % (project_name, git_repo))
    # 根据项目判断mongodb查询条件,并获取查询结果
    for i in git_repo:
        if project_name == 'istack-pike':
            query_all = [{
                "$unwind": "$Resource"
            }, {
                "$match": {
                    "Resource.project": i,
                    "Resource.branch": {
                        '$regex': "pike-core"
                    },
                    "Time": time_pattern
                }
            }]
        elif project_name == 'istack-liberty':
            query_all = [{
                "$unwind": "$Resource"
            }, {
                "$match": {
                    "Resource.project": i,
                    "Resource.branch": "liberty-core",
                    "Time": time_pattern
                }
            }]
        else:
            project_pattern = re.compile(i + r'.*')
            query_all = [{
                "$unwind": "$Resource"
            }, {
                "$match": {
                    "Resource.project": project_pattern,
                    "Time": time_pattern
                }
            }]
        curor = collection.aggregate(query_all)
        for i in curor:
            count = i["Resource"]["count"]
            logger("INFO", "%s, patch count is %d" % (i, count))
            patch_count = patch_count + count
    logger("INFO", "the total amount of patch is %d" % patch_count)
    return patch_count
Пример #29
0
 def __init__(self, config=getconfig()):
     self.config = config
Пример #30
0
# @addtogroup server
# @brief This is server component
# @{
# @addtogroup server
# @brief This is server module
# @{
##
import sys
import time
import pymysql
from testlink import TestlinkAPIClient
from config import getconfig
reload(sys)
sys.setdefaultencoding('utf-8')

testlink_url = getconfig("testlink", "testlink_url")
ip = testlink_url.split("/")[2]
TLURL = testlink_url + "/lib/api/xmlrpc/v1/xmlrpc.php"
DEVKey = getconfig("testlink", "DEVKey")
test_user_name = getconfig("testlink", "test_user_name")
tls = TestlinkAPIClient(TLURL, DEVKey)
nowtime = time.strftime("%Y-%m-%d")


def getsql(sql):
    """
    根据项目名称和文件夹名称查找文件夹对应的
    此方法只能查找到根目录下的文件夹的id,多级目录无法查找
    :param sql: sql语句
    :return:查询结果
    """
Пример #31
0
__author__ = 'Eggsy'
__date__ = '2019/4/23 13:36'

import sys
sys.path.append("../lib/")
sys.path.append("../restful/")
from transfer_project_name import find_project
from pymongo import MongoClient
from config import getconfig
from logger import logger
import pymysql
reload(sys)
sys.setdefaultencoding('utf-8')

# 获取mongodb:IP,port,databases
databases = getconfig("mongodb", "dbs")
# 数据库IP、端口号
IP = getconfig("mongodb", "IP")
# IP = "10.200.43.160"
port = int(getconfig("mongodb", "port"))
#testlink的IP
testlink_ip = getconfig("testlink", "testlink_url").split("/")[2]


# 连接mongodb数据库
def connect_Mongo():
    client = MongoClient(IP, port)
    dblist = client.list_database_names()
    if databases in dblist:
        # print u"数据库存在"
        database = client[databases]