Beispiel #1
0
def do_child(runname, debug=False):
    """
	Run a test inside the child process.

	@param runname:	Name of muFAT run
	@param debug:	Whether to actually store results
	"""

    # load and execute tests
    from .mvrt import Core
    from runpy import run_path

    # pre-clean data folders
    if os.path.isdir(Core.UserDataFolder):
        print "Cleaning folder:", Core.UserDataFolder
        shutil.rmtree(Core.UserDataFolder)

    path = normalize(
        os.path.join(r"Y:\mufat\testruns\regressionpaths", runname))
    sys.path.append(os.path.dirname(path))
    results = run_path(path, run_name="__main__")

    # return results to parent for processing
    if not debug:
        q = RedisQueue("_".join(["Q", DBKEY, HOST]))
        q.put({
            'pass': results["passed"],
            'fail': results["failed"],
            'untested': results["skipped"],
            'summary': results["logfile"],
            'shutdown': True,
            'crash': results["skipped"],
            'retained_samples': [],
            'return_code': 0,
            'timeout': False,
            'svn_rev': Core.GetRuntimeSpecialBuild()
        })

    Core.Release()
    sys.exit(0)
Beispiel #2
0
def do_child(runname, debug=False):
	"""
	Run a test inside the child process.

	@param runname:	Name of muFAT run
	@param debug:	Whether to actually store results
	"""

	# load and execute tests
	from .mvrt import Core
	from runpy import run_path

	# pre-clean data folders
	if os.path.isdir(Core.UserDataFolder):
		print "Cleaning folder:", Core.UserDataFolder
		shutil.rmtree(Core.UserDataFolder)

	path = normalize(os.path.join(r"Y:\mufat\testruns\regressionpaths", runname))
	sys.path.append(os.path.dirname(path))
	results = run_path(path, run_name="__main__")

	# return results to parent for processing
	if not debug:
		q = RedisQueue("_".join(["Q", DBKEY, HOST]))
		q.put({
			'pass': results["passed"],
			'fail': results["failed"],
			'untested': results["skipped"],
			'summary': results["logfile"],
			'shutdown': True,
			'crash': results["skipped"],
			'retained_samples': [],
			'return_code': 0,
			'timeout': False,
			'svn_rev': Core.GetRuntimeSpecialBuild()
		})

	Core.Release()
	sys.exit(0)
Beispiel #3
0
                continue
    return result


if (__name__ == '__main__'):
    strNum = ''
    RESERVER_NUM = 1
    ROBOT_NUM = 2

    robotNumber = []
    luckNumber = []
    norNumber = []

    result = False

    lucknum = RedisQueue("HeyDo_LuckNum")
    robotnum = RedisQueue("HeyDo_RobotNumBlock")
    nornum = RedisQueue("HeyDo_NorNumBlock")

    if not result:
        lucknum.del_key()
        robotnum.del_key()
        nornum.del_key()

    f = open("./userid.txt", "w+")
    for n in range(2000404, 2030000):
        strNum = str(n)
        result = checkLuckNumber(strNum)
        if result:
            luckNumber.append(strNum)
            lucknum.push(strNum)
Beispiel #4
0
import json

from flask import Flask
from flask import request

from index.SelasticSearch import SelasticSeacrh
from index.settings import DOCMAP_FILE
from queue import RedisQueue

app = Flask(__name__)
from index.settings import INDEX_LOCK_QUEUE
queue = RedisQueue(INDEX_LOCK_QUEUE)
CACHE = {}
REVERSE_INDEX = SelasticSeacrh.MERGE_SEGMENTS()
doc_memorymap = json.load(open(DOCMAP_FILE, 'r'))


@app.route('/')
def hello_world():
    return 'Selasticsearch is up now'


@app.route('/search')
def search():
    q = request.args['q']
    cache_q = q
    field = request.args.get('field', '')
    q = map(lambda x: x.lower(), q.split())
    search = SelasticSeacrh([], REVERSE_INDEX, doc_memorymap)
    if len(q) > 1:
        if cache_q in CACHE:
Beispiel #5
0
import redis
import os
import time
import run
import json
from queue import RedisQueue
from config import r

trend_q = RedisQueue('trending', r)
trend_store = 'trending_store'

while True:
    print 'checking work queue'
    message = trend_q.dequeue()
    print message
    # get trending players
    if message[1] == 'GET TRENDING PLAYERS':
        results = run.trending_players()
        print results
        r.set(trend_store, json.dumps(results))
    time.sleep(2)
Beispiel #6
0
import redis
import run
import os
import time
from queue import RedisQueue

redis_url = os.getenv('REDISTOGO_URL')
scraper_q = RedisQueue('scrapers', redis_url)

# Listens for message 'CRAWL ALL SPIDERS' on Redis queue at key 'scrapers'

while True:
    print 'checking work queue'
    message = scraper_q.dequeue()
    print message
    if message[1] == 'CRAWL ALL SPIDERS':
        run.crawl_all()
        print 'DONE CRAWLING'
    if message[1] == 'PRINT DIRS':
        run.print_dirs()
    time.sleep(2)
Beispiel #7
0
def main(suites_or_runs, debug=False):
	"""
	Runs a list of suites of runs inside the parent process.

	:param suites_or_runs: A list of suite names or run names to be executed
	:param debug: Whether to actually store results
	"""

	suites = {}
	# argument is just a single string
	if isinstance(suites_or_runs, basestring):
		suites_or_runs = [suites_or_runs]
	for arg in suites_or_runs:
		if os.path.splitext(arg)[1] != ".py":
			if not suites.has_key(arg):
				suites[arg] = set()
			suites[arg].update(load_suite(arg))
		else:
			if not suites.has_key("mac"):
				suites["mac"] = set()
			suites["mac"].add(arg)

	global DBKEY
	DBKEY = DBKEY or time.strftime("%Y-%m-%d,%H-%M-%S")

	# cleanup/create necessary folders
	if not os.path.exists(MUVEEDEBUG):
		os.makedirs(MUVEEDEBUG)
	cachedir = os.path.expanduser("~/Library/Application\ Support/muvee\ Technologies/071203")
	if os.path.exists(cachedir):
		shutil.rmtree(cachedir)

	# python command to launch the child process
	svn_rev = 0
	cmd = [sys.executable, "-u", "-m", "muvee.runner"]
	if sys.platform == "darwin":
		cmd = ["arch -i386"] + cmd

	# prepare to upload logfiles to Amazon S3
	new_key = boto.connect_s3(AWS_ACCESS_KEY, AWS_SECRET_KEY) \
			.get_bucket("mufat").new_key
	subkey = time.strftime("%Y-%m-%d_%H_%M", time.strptime(DBKEY, "%Y-%m-%d,%H-%M-%S"))

	global PRINT_OUTPUT
	for suite, runs in suites.iteritems():
		for run in runs:
			shortname = os.path.splitext(os.path.basename(run))[0]
			start = time.time()
			logfile = os.path.join(MUVEEDEBUG, "(%s)%s_Log.txt" % \
					(time.strftime("%Y%m%d%H%M%S", time.localtime(start)), shortname))

			# run child process
			print "Starting muFAT process for %s (%s)." % (run, suite)
			p = subprocess.Popen(" ".join(cmd + ['"' + run + '"', "--child", "--key", DBKEY]),
								shell=True,
								stdout=subprocess.PIPE,
								stderr=subprocess.STDOUT)

			# start a watchdog to kill the child if it takes too long
			Watchdog(p, 3600).start()

			# collect output text for processing later
			with codecs.open(logfile, "w+", "utf-8") as f:
				while True:
					line = unicode(p.stdout.readline(), errors="replace")
					if not line:
						break
					f.write(line)
					if PRINT_OUTPUT:
						print line.encode("ascii", errors="replace"),

			# block until process completes and record running time
			p.communicate()
			minutes, seconds = divmod(time.time() - start, 60)
			hours, minutes = divmod(minutes, 60)

			# read results from child
			try:
				q = RedisQueue("_".join(["Q", DBKEY, HOST]))
				result = q.get_nowait()
			except RedisQueue.Empty:
				# no results - child probably died?
				result = {
					'pass': 0,
					'fail': 0,
					'untested': 0,
					'summary': '',
					'shutdown': False,
					'crash': True,
					'retained_samples': [],
					'return_code':-1,
					'timeout': False
				}

			# parse log file for assertions and upload to Amazon S3
			try:
				with open(logfile, "r") as f:
					asserts, assertdict = get_asserts(f.read())
					result.update({
						'assert': asserts,
						'unique_asserts': assertdict,
						'time': (hours, minutes, seconds)
					})
					key = new_key("%s/%s/(%s)%s_Log.txt" % (HOST.upper(), subkey, \
						time.strftime("%Y%m%d%H%M%S", time.localtime(start)), shortname))
					key.set_contents_from_file(f, reduced_redundancy=True, rewind=True)
					key.make_public()
					result["log"] = "https://mufat.s3.amazonaws.com/" + key.key
			finally:
				os.remove(logfile)

			# upload summary file to Amazon S3
			if result.get("summary"):
				summary = result["summary"]
				try:
					key = new_key("%s/%s/%s" % (HOST.upper(), subkey, shortname + ".txt"))
					key.set_contents_from_filename(summary, reduced_redundancy=True)
					key.make_public()
					result["summary"] = "https://mufat.s3.amazonaws.com/" + key.key
				finally:
					os.remove(summary)

			if result.get("svn_rev"):
				svn_rev = result["svn_rev"]

			if not debug:
				print "Uploading intermediate results..."
				r = requests.post(SERVER_URL + "%s/%s/%s/submit" % (DB, DBKEY, HOST), {
					'suite': suite,
					'runname': run,
					'results': json.dumps(result)
				}, headers={ "X-NO-LOGIN": "******" })
				r.raise_for_status()
Beispiel #8
0
def main(suites_or_runs, debug=False):
    """
	Runs a list of suites of runs inside the parent process.

	:param suites_or_runs: A list of suite names or run names to be executed
	:param debug: Whether to actually store results
	"""

    suites = {}
    # argument is just a single string
    if isinstance(suites_or_runs, basestring):
        suites_or_runs = [suites_or_runs]
    for arg in suites_or_runs:
        if os.path.splitext(arg)[1] != ".py":
            if not suites.has_key(arg):
                suites[arg] = set()
            suites[arg].update(load_suite(arg))
        else:
            if not suites.has_key("mac"):
                suites["mac"] = set()
            suites["mac"].add(arg)

    global DBKEY
    DBKEY = DBKEY or time.strftime("%Y-%m-%d,%H-%M-%S")

    # cleanup/create necessary folders
    if not os.path.exists(MUVEEDEBUG):
        os.makedirs(MUVEEDEBUG)
    cachedir = os.path.expanduser(
        "~/Library/Application\ Support/muvee\ Technologies/071203")
    if os.path.exists(cachedir):
        shutil.rmtree(cachedir)

    # python command to launch the child process
    svn_rev = 0
    cmd = [sys.executable, "-u", "-m", "muvee.runner"]
    if sys.platform == "darwin":
        cmd = ["arch -i386"] + cmd

    # prepare to upload logfiles to Amazon S3
    new_key = boto.connect_s3(AWS_ACCESS_KEY, AWS_SECRET_KEY) \
      .get_bucket("mufat").new_key
    subkey = time.strftime("%Y-%m-%d_%H_%M",
                           time.strptime(DBKEY, "%Y-%m-%d,%H-%M-%S"))

    global PRINT_OUTPUT
    for suite, runs in suites.iteritems():
        for run in runs:
            shortname = os.path.splitext(os.path.basename(run))[0]
            start = time.time()
            logfile = os.path.join(MUVEEDEBUG, "(%s)%s_Log.txt" % \
              (time.strftime("%Y%m%d%H%M%S", time.localtime(start)), shortname))

            # run child process
            print "Starting muFAT process for %s (%s)." % (run, suite)
            p = subprocess.Popen(
                " ".join(cmd + ['"' + run + '"', "--child", "--key", DBKEY]),
                shell=True,
                stdout=subprocess.PIPE,
                stderr=subprocess.STDOUT)

            # start a watchdog to kill the child if it takes too long
            Watchdog(p, 3600).start()

            # collect output text for processing later
            with codecs.open(logfile, "w+", "utf-8") as f:
                while True:
                    line = unicode(p.stdout.readline(), errors="replace")
                    if not line:
                        break
                    f.write(line)
                    if PRINT_OUTPUT:
                        print line.encode("ascii", errors="replace"),

            # block until process completes and record running time
            p.communicate()
            minutes, seconds = divmod(time.time() - start, 60)
            hours, minutes = divmod(minutes, 60)

            # read results from child
            try:
                q = RedisQueue("_".join(["Q", DBKEY, HOST]))
                result = q.get_nowait()
            except RedisQueue.Empty:
                # no results - child probably died?
                result = {
                    'pass': 0,
                    'fail': 0,
                    'untested': 0,
                    'summary': '',
                    'shutdown': False,
                    'crash': True,
                    'retained_samples': [],
                    'return_code': -1,
                    'timeout': False
                }

            # parse log file for assertions and upload to Amazon S3
            try:
                with open(logfile, "r") as f:
                    asserts, assertdict = get_asserts(f.read())
                    result.update({
                        'assert': asserts,
                        'unique_asserts': assertdict,
                        'time': (hours, minutes, seconds)
                    })
                    key = new_key("%s/%s/(%s)%s_Log.txt" % (HOST.upper(), subkey, \
                     time.strftime("%Y%m%d%H%M%S", time.localtime(start)), shortname))
                    key.set_contents_from_file(f,
                                               reduced_redundancy=True,
                                               rewind=True)
                    key.make_public()
                    result["log"] = "https://mufat.s3.amazonaws.com/" + key.key
            finally:
                os.remove(logfile)

            # upload summary file to Amazon S3
            if result.get("summary"):
                summary = result["summary"]
                try:
                    key = new_key("%s/%s/%s" %
                                  (HOST.upper(), subkey, shortname + ".txt"))
                    key.set_contents_from_filename(summary,
                                                   reduced_redundancy=True)
                    key.make_public()
                    result[
                        "summary"] = "https://mufat.s3.amazonaws.com/" + key.key
                finally:
                    os.remove(summary)

            if result.get("svn_rev"):
                svn_rev = result["svn_rev"]

            if not debug:
                print "Uploading intermediate results..."
                r = requests.post(SERVER_URL + "%s/%s/%s/submit" %
                                  (DB, DBKEY, HOST), {
                                      'suite': suite,
                                      'runname': run,
                                      'results': json.dumps(result)
                                  },
                                  headers={"X-NO-LOGIN": "******"})
                r.raise_for_status()