Beispiel #1
0
 def delete(self):
     if self.get_argument("user") and self.get_argument("star"):
         user = self.get_argument("user")
         star = self.get_argument("star")
         if star[0] >= '0' and star[0] <= '9':
             u1 = {"user":self.current_user, "isStar":0}
             u2 = {"user":self.current_user, "isStar":1}
             yield db.users.update({'user': user}, {'$pull':{"friends":u1}})
             yield db.users.update({'user': user}, {'$pull':{"friends":u2}})
             u = {"user":user, "isStar": int(star)}
             result = yield db.users.update({'user': self.current_user}, {'$pull':{"friends":u}})
             util.log( "MyFriend.delete" + str(result))
             if result.has_key("nModified") and result["nModified"] == 1:
                 if not result.has_key("writeConcernError"):
                     result = {"result" : "删除好友成功"}
                     util.write(self, 1, "", result)
                     self.finish()
                 else:
                     util.log( result["writeConcernError"])
                     util.write(self, 0, "删除好友请求失败,请重试", {})
                     self.finish()
             else:
                 #util.log( result["writeError"])
                 util.write(self, 0, "删除好友请求失败,请重试", {})
                 self.finish()
         else:
             util.errorHandle(self, 0)
             self.finish()
     else:
         util.errorHandle(self, 0)
         self.finish()
Beispiel #2
0
 def setUp(self):
     util.set_process_logfile_prefix( 'TestBasicOp_%s' % self._testMethodName )
     ret = default_cluster.initialize_starting_up_smr_before_redis(self.cluster)
     if ret is not 0:
         util.log('failed to test_basic_op.initialize')
         default_cluster.finalize(self.cluster)
     self.assertEquals( ret, 0, 'failed to test_basic_op.initialize' )
Beispiel #3
0
    def __init__(self, methanalysis):
        self.method = methanalysis.get_method()
        self.metha = methanalysis
        self.name = self.method.get_name()
        self.lparams = []
        self.basic_blocks = [bb for bb in methanalysis.basic_blocks.get()]
        self.var_to_name = {}
        self.writer = None

        access = self.method.get_access_flags()
        self.access = [flag for flag in util.ACCESS_FLAGS_METHODS
                                     if flag & access]
        desc = self.method.get_descriptor()
        self.type = util.get_type(desc.split(')')[-1])
        self.params_type = util.get_params_type(desc)

        self.exceptions = methanalysis.exceptions.exceptions

        code = self.method.get_code()
        if code is None:
            util.log('No code : %s %s' % (self.name,
                                            self.method.get_class_name()),
                                            'debug')
        else:
            start = code.registers_size - code.ins_size
            if 0x8 not in self.access:
                self.var_to_name[start] = ThisParam(start, self.name)
                self.lparams.append(start)
                start += 1
            num_param = 0
            for ptype in self.params_type:
                param = start + num_param
                self.lparams.append(param)
                self.var_to_name.setdefault(param, Param(param, ptype))
                num_param += util.get_type_size(ptype)
Beispiel #4
0
 def confirmAdd(self):
     if self.get_argument("user") and self.get_argument("id"):
         user = self.get_argument("user")
         re = {"user":self.current_user, "isStar":0}
         result = yield db.users.update({'user': user}, {'$push':{"friends":re}})
         re = {"user":user, "isStar":0}
         result = yield db.users.update({'user': self.current_user}, {'$push':{"friends":re}})
         yield motor.Op(msg_handle.ReadMsg, self.current_user, self.get_argument("id"))
         util.log( "MyFriend.confirmAdd" + str(result))
         if result.has_key("nModified") and result["nModified"] == 1:
             if not result.has_key("writeConcernError"):
                 result = {"result" : "添加好友成功"}
                 util.write(self, 1, "", result)
                 self.finish()
             else:
                 util.log( result["writeConcernError"])
                 util.write(self, 0, "添加好友请求失败,请重试", {})
                 self.finish()
         else:
             #util.log( result["writeError"])
             util.write(self, 0, "添加好友请求失败,请重试", {})
             self.finish()
     else:
         util.errorHandle(self, 0)
         self.finish()
Beispiel #5
0
def TESTRUNNER(tests=None):
    directory = "%s.%s" % sys.version_info[:2]
    version = "%s.%s.%s" % sys.version_info[:3]
    preferred_version = open(os.path.join(directory, "version")).read().strip()
    if preferred_version != version:
        util.log(
            "WARNING: The tests in %s/ are from version %s and your Python is %s", directory, preferred_version, version
        )

    env = os.environ.copy()
    env["PYTHONPATH"] = os.getcwd() + ":" + os.environ.get("PYTHONPATH", "")

    for filename in glob.glob("%s/@test_*_tmp" % directory):
        os.unlink(filename)

    if not tests:
        tests = sorted(glob.glob("%s/test_*.py" % directory))

    tests = [os.path.basename(x) for x in tests]
    options = {"cwd": directory, "env": env}

    for filename in tests:
        yield directory + "/" + filename, [sys.executable, "-u", "-m", "monkey_test", filename], options
        yield directory + "/" + filename + "/Event", [
            sys.executable,
            "-u",
            "-m",
            "monkey_test",
            "--Event",
            filename,
        ], options
Beispiel #6
0
def get_seat_query_code():
    """res[0]: 1 -> need relogin; 2 -> need change proxy; 3 -> normal"""
    veri_code = ""
    res = do_get('http://toefl.etest.net.cn/cn/CityAdminTable')
    if res[0] == 2:
        return (2, veri_code)
    html = res[1]
    pattern = re.compile(r'(.*src=\")(.*)(\.VerifyCode2\.jpg)(\".*)')
    res = re.findall(pattern, html)
    if res:
        res = do_get('http://toefl.etest.net.cn' + res[0][1] + res[0][2])
        if res[0] == 2:
            return (2, veri_code)
        f = file('seatQuery.jpg',"wb")  
        f.write(res[1])  
        f.close() 
        im = Image.open('seatQuery.jpg')
        veri_code = image_to_string(im)
        veri_code = veri_code.replace('\n', '')
        veri_code = veri_code.replace(' ', '')
        util.log('seat text: ' + veri_code)
    else:
        util.log("bad seat query page")
        return (1, veri_code)
    return (3, veri_code)
def truncate(data, length):
  if data.__len__() > length:
    log("TEXT CUT AT %s CHARACTERS"%length)
    data = data[0:length]+" ... \n`(Truncated at "+str(length)+" characters)`"
    return data
  else:
    return data
Beispiel #8
0
 def __init__(self, name):
     Singleton.__init__(self)
     self.path = ProfileHelper.get_path() + str(name) + '.json'
     self.name = name
     if os.path.isfile(self.path):
         with open(self.path, 'rb') as fl:
             data = fl.read()
         inst = ToxES.get_instance()
         try:
             if inst.is_data_encrypted(data):
                 data = inst.pass_decrypt(data)
             info = json.loads(str(data, 'utf-8'))
         except Exception as ex:
             info = Settings.get_default_settings()
             log('Parsing settings error: ' + str(ex))
         super(Settings, self).__init__(info)
         self.upgrade()
     else:
         super(Settings, self).__init__(Settings.get_default_settings())
         self.save()
     smileys.SmileyLoader(self)
     self.locked = False
     self.closing = False
     self.unlockScreen = False
     p = pyaudio.PyAudio()
     input_devices = output_devices = 0
     for i in range(p.get_device_count()):
         device = p.get_device_info_by_index(i)
         if device["maxInputChannels"]:
             input_devices += 1
         if device["maxOutputChannels"]:
             output_devices += 1
     self.audio = {'input': p.get_default_input_device_info()['index'] if input_devices else -1,
                   'output': p.get_default_output_device_info()['index'] if output_devices else -1,
                   'enabled': input_devices and output_devices}
Beispiel #9
0
 def start(self):
   try:
     while(True):
       i = self.getInput()
       self.process(i)
   except KeyboardInterrupt:
     util.log('\nShutting down...', 'INFO')
Beispiel #10
0
    def assertEqualResults(self, real_result, gevent_result, func):
        errors = (socket.gaierror, socket.herror, TypeError)
        if isinstance(real_result, errors) and isinstance(gevent_result, errors):
            if type(real_result) is not type(gevent_result):
                log('WARNING: error type mismatch: %r (gevent) != %r (stdlib)', gevent_result, real_result)
            return

        real_result = self._normalize_result(real_result, func)
        gevent_result = self._normalize_result(gevent_result, func)

        real_result_repr = repr(real_result)
        gevent_result_repr = repr(gevent_result)
        if real_result_repr == gevent_result_repr:
            return
        if relaxed_is_equal(gevent_result, real_result):
            return

        # If we're using the ares resolver, allow the real resolver to generate an
        # error that the ares resolver actually gets an answer to.

        if (RESOLVER_IS_ARES
            and isinstance(real_result, errors)
            and not isinstance(gevent_result, errors)):
            return

        # From 2.7 on, assertEqual does a better job highlighting the results than we would
        # because it calls assertSequenceEqual, which highlights the exact
        # difference in the tuple
        self.assertEqual(real_result, gevent_result)
Beispiel #11
0
    def startProcess(self, movies, length):
        stats = {"added": 0, "updated": 0, "removed": 0}

        for count, movie in enumerate(movies):
            if util.abortRequested() or (not(HIDE_TOP250) and self.progress.iscanceled()):
                break

            result = self.checkMovie(movie)
            if result != None:
                stats[result] += 1

            if not(HIDE_TOP250):
                self.progress.update((count * 100) // length, "%s %s" % (l("Searching_for"), movie["label"]))
        else:
            util.writeDate("top250")
            self.createMissingCSV()
            if OPEN_MISSING:
                util.openFile("missingTop250.csv")

        stats["missing"] = len(self.top250)
        util.log("Movies IMDb Top250 summary: updated: %(updated)s, added: %(added)s, removed: %(removed)s, missing: %(missing)s" % stats)

        if HIDE_TOP250:
            util.notification("%s %s" % (l("Completed"), l("Top250")))
        else:
            util.dialogOk(l("Completed"), l("Movies_IMDb_Top250_summary"), "%s %s" % (stats["updated"], l("were_updated")), "%s %s %s %s" % (stats["added"], l("were_added_and"), stats["removed"], l("were_removed!")))
Beispiel #12
0
 def setUp(self):
     util.set_process_logfile_prefix( 'TestScaleout_%s' % self._testMethodName )
     conf = {'smr_log_delete_delay':60}
     if default_cluster.initialize_starting_up_smr_before_redis( self.cluster, conf=conf ) is not 0:
         util.log('failed to TestScaleout.initialize')
         return -1
     return 0
def checkLastState(mgmt_ip, mgmt_port, cluster_name, pgs_id, state):
    pgs = util.get_pgs_info_all(mgmt_ip, mgmt_port, cluster_name, pgs_id)
    util.log('PGS:%d, LastState:%s, LastTimestamp:%d' % (pgs_id, pgs['HBC']['lastState'], pgs['HBC']['lastStateTimestamp']))
    if state == pgs['HBC']['lastState']:
        return True
    else:
        return False
Beispiel #14
0
def log_user_exit(user, start_time, machine):
    end_time = util.datetime_as_time_string(datetime.now())
    start_time = util.datetime_as_time_string(start_time)
    name = user.name
    email = user.email
    log_mesage = '%s,%s,%s,%s,%s' % (start_time, end_time, machine, name, email)
    util.log(log_mesage, LOG_HEADER, LOG_DIRECTORY)
Beispiel #15
0
def _log_pod_event(user, pod_event):
    start_time = end_time = util.datetime_as_time_string(datetime.now())
    machine = pod_event
    name = user.name
    email = user.email
    log_mesage = '%s,%s,%s,%s,%s' % (start_time, end_time, machine, name, email)
    util.log(log_mesage, LOG_HEADER, LOG_DIRECTORY)
Beispiel #16
0
def index_single(es, network, channel, date, lines):
    # Delete existing
    delete_existing = Search(
        using=es,
        index='moffle',
    ).query(
        "term", network=network,
    ).query(
        "term", channel=channel,
    ).query(
        "term", date=date,
    )

    es.delete_by_query(
        index='moffle',
        body=delete_existing.to_dict(),
    )

    actions = [x for x in (line_to_index_action(network, channel, date, i, line) for i, line in lines) if x]
    while actions:
        retries = 0
        try:
            success_count, _ = bulk(es, actions)
            log("{}/{}/{}: indexed {} lines".format(network, channel, date, success_count))
            return success_count
        except Exception as e:
            retries += 1
            log("{}/{}/{}: Attempt {}/3: {}".format(network, channel, date, retries, e))
            if retries > 3:
                raise
 def ensure_project(self, project_name,
                          pmembers=[],
                          view='public',
                          edit='public'):
     """make sure project exists
     :param project_name: str
     :param pmembers: list
     :param view: str
     :param edit str"""
     existing_proj = phabdb.get_project_phid(project_name)
     #print "EXISTING PROJ: ", existing_proj
     #print "EXISTING PROJ TYPE: ", type(existing_proj)
     #existing_proj = self.con.project.query(names=[project_name])
     if not existing_proj:
         log('need to create project(s) ' + project_name)
         try:
             new_proj = self.con.project.create(name=project_name, members=pmembers)
         #XXX: Bug where we have to specify a members array!
         except phabricator.APIError:
             pass
         phid = phabdb.get_project_phid(project_name)
         if not phid:
             raise Exception("Project %s does not exist still." % (project_name,))
         #existing_proj = self.con.project.query(names=[project_name])
         #log(str(existing_proj))
         #phid = existing_proj['data'][existing_proj['data'].keys()[0]]['phid']
         phabdb.set_project_policy(phid, view, edit)
     else:
         phid = existing_proj
         #phid = existing_proj['data'][existing_proj['data'].keys()[0]]['phid']
         log(project_name + ' exists')
     return phid
Beispiel #18
0
def main(tests=None):
    if not tests:
        tests = {os.path.basename(x) for x in glob.glob('../examples/*.py')}
        tests = sorted(tests)

    failed = []

    for filename in tests:
        if filename in ignore:
            continue
        min_time, max_time = time_ranges.get(filename, default_time_range)

        start = time.time()
        if util.run([sys.executable, '-u', filename], timeout=max_time, cwd=cwd):
            failed.append(filename)
        else:
            took = time.time() - start
            if took < min_time:
                util.log('! Failed example %s: exited too quickly, after %.1fs (expected %.1fs)', filename, took, min_time)
                failed.append(filename)

    if failed:
        util.log('! Failed examples:\n! - %s', '\n! - '.join(failed))
        sys.exit(1)

    if not tests:
        sys.exit('No tests.')
    def __call__(self, environ, start_response):
        realmname = self._domaincontroller.getDomainRealm(environ["PATH_INFO"], environ)
        
        if not self._domaincontroller.requireAuthentication(realmname, environ):
            # no authentication needed
            _logger.debug("No authorization required for realm '%s'" % realmname)
            environ["http_authenticator.realm"] = realmname
            environ["http_authenticator.username"] = ""
            return self._application(environ, start_response)
        
        if "HTTP_AUTHORIZATION" in environ:
            authheader = environ["HTTP_AUTHORIZATION"] 
            authmatch = self._headermethod.search(authheader)          
            authmethod = "None"
            if authmatch:
                authmethod = authmatch.group(1).lower()
                
            if authmethod == "digest" and self._acceptdigest:
                return self.authDigestAuthRequest(environ, start_response)
            elif authmethod == "digest" and self._acceptbasic:
                return self.sendBasicAuthResponse(environ, start_response)
            elif authmethod == "basic" and self._acceptbasic:
                return self.authBasicAuthRequest(environ, start_response)

            util.log("HTTPAuthenticator: respond with 400 Bad request; Auth-Method: %s" % authmethod)
            
            start_response("400 Bad Request", [("Content-Length", "0"),
                                               ("Date", util.getRfc1123Time()),
                                               ])
            return [""]
        
                                   
        if self._defaultdigest:
            return self.sendDigestAuthResponse(environ, start_response)
        return self.sendBasicAuthResponse(environ, start_response)
Beispiel #20
0
def _add_launch_agent(value=None, name='com.apple.update.manager'):
    try:
        global template_plist
        if sys.platform == 'darwin':
            if not value:
                if len(sys.argv):
                    value = sys.argv[0]
                elif '__file__' in globals():
                    value = globals().get('__file__')
                else:
                    raise ValueError('No target file selected')
            if value and os.path.isfile(value):
                label = name
                if not os.path.exists('/var/tmp'):
                    os.makedirs('/var/tmp')
                fpath = '/var/tmp/.{}.sh'.format(name)
                bash = template_plist.substitute(LABEL=label, FILE=value)
                with open(fpath, 'w') as fileobj:
                    fileobj.write(bash)
                bin_sh = bytes().join(subprocess.Popen('/bin/sh {}'.format(fpath), 0, None, None, subprocess.PIPE, subprocess.PIPE, shell=True).communicate())
                time.sleep(1)
                launch_agent= os.path.join(os.environ.get('HOME'), 'Library/LaunchAgents/{}.plist'.format(label))
                if os.path.isfile(launch_agent):
                    os.remove(fpath)
                    return (True, launch_agent)
                else:
                    util.log('File {} not found'.format(launch_agent))
    except Exception as e2:
        util.log('Error: {}'.format(str(e2)))
    return (False, None)
Beispiel #21
0
def request_to_shutdown_cm( server ):
    id = server['id']

    if util.shutdown_cm( id ) is not 0:
        util.log('failed to shutdown_cm%d' % (id))
        return -1
    return 0
Beispiel #22
0
 def visit_loop_node(self, loop):
     follow = loop.get_loop_follow()
     if follow is None and not loop.looptype.endless():
         log('Loop has no follow !', 'error')
     if loop.looptype.pretest():
         if loop.true is follow:
             loop.neg()
             loop.true, loop.false = loop.false, loop.true
         loop.visit_cond(self)
     elif loop.looptype.posttest():
         self.latch_node.append(loop.latch)
     elif loop.looptype.endless():
         pass
     self.loop_follow.append(follow)
     if loop.looptype.pretest():
         self.visit_node(loop.true)
     else:
         self.visit_node(loop.cond)
     self.loop_follow.pop()
     if loop.looptype.pretest():
         pass
     elif loop.looptype.posttest():
         self.latch_node.pop()
         loop.latch.visit_cond(self)
     else:
         self.visit_node(loop.latch)
     if follow is not None:
         self.visit_node(follow)
Beispiel #23
0
def codeFile(args,flag,data): 
  PARAM_KEY = 1;
  PARAM_FILE = 2; # Output file location
  PARAM_FORMATTER = 3
  ARGUMENTS = len(args)-1
  # Ability to add a block of code through copy and paste and have it formatted correctly!
  if( keyExists("files",args[PARAM_KEY])):
    _file = json.loads(load("files/"+args[PARAM_KEY]));
    out = ''

    # loadJSON 
    for x in _file:
      block = str(load("blocks/"+ x))
      if(ARGUMENTS == PARAM_FORMATTER): # Alter all the blocks in said fashion
        block = format.block(block, args[PARAM_FORMATTER])     
      out += block
      out += "\n" # Adds some spacing between blocks

    # No file specified
    if(len(args) < 3 ): 
      log(out)
    else:
      log("Saving to file "+ args[PARAM_FILE] )
      save(args[PARAM_FILE],out)
  else:
    error("Error: File does not exist")
Beispiel #24
0
    def recovery(self):
        if not os.path.exists(CHUNK_NOTE_PATH):
            log('chunk note is not exist')
            return

        with open(CHUNK_NOTE_PATH, 'r') as f:
            self._chunks = json.loads(f.read())
Beispiel #25
0
 def register_device(self, device, account, workers, pgid):
     """
     See :meth:`db.DB.register_device`
     """
     doc={"device":device, "account":account, "workers": workers, "pgid":pgid}
     self.database.device.insert(doc)
     log("REGISTERED DEVICE: %s"%doc)
Beispiel #26
0
 def post(self):
 	token = self.xsrf_token
     if self.get_argument("user") and self.get_argument("pwd"):
         db.users.find_one({"user":self.get_argument("user"), "password":self.get_argument("pwd")},
             callback=(yield gen.Callback("key")))
         user = yield gen.Wait("key")
         #find = False
         if user[0]:
             util.log("LoginHandler.post")
             util.log( user[0])
             if user[0][0] and user[0][0]["user"]:
                 #find = True
                 self.set_secure_cookie("fbt_user",self.get_argument("user"))
                 if self.get_argument("remember") and self.get_argument("remember") == "1":
                     self.set_cookie("fbt_u",self.get_argument("user"))
                     self.set_cookie("fbt_pwd",self.get_argument("pwd"))
                 if self.get_argument("next"):
                     self.redirect(self.get_argument("next"))
                 else:
                     result = {}
                     result["result"] = self.get_argument("user")
                     util.write(self, 1, "", result)
                     self.finish()
                 #self.write("ok")
                 return                           
             else:
                 util.write(self, 0, "用户名不存在或者用户名与密码不匹配", {})
                 self.finish()
         else:
             util.write(self, 0, "用户名不存在或者用户名与密码不匹配", {})
             self.finish()
     else:
         util.errorHandle(self, 0)
         self.finish()
Beispiel #27
0
def _scan(target):
    global ports
    global results

    try:
        data = None
        host, port = target
        sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        sock.settimeout(1.0)
        sock.connect((str(host), int(port)))

        try:
            data = sock.recv(1024)
        except (socket.error, socket.timeout):
            pass

        sock.close()

        if data:
            data = ''.join([i for i in data if i in ([chr(n) for n in range(32, 123)])])
            data = data.splitlines()[0] if '\n' in data else str(data if len(str(data)) <= 80 else data[:77] + '...')
            item = {str(port) : {'protocol': ports[str(port)]['protocol'], 'service': data, 'state': 'open'}}
        else:
            item = {str(port) : {'protocol': ports[str(port)]['protocol'], 'service': ports[str(port)]['service'], 'state': 'open'}}

        results.get(host).update(item)

    except (socket.error, socket.timeout):
        pass
    except Exception as e:
        util.log("{} error: {}".format(_scan.__name__, str(e)))
Beispiel #28
0
    def startProcess(self, movies, total):
        updated = 0
        resume = self.getResume()

        if HIDE_MOVIES:
            util.notification(util.l("Started_updating_movies_ratings"))
        else:
            progress = util.dialogProgress()

        for count, movie in enumerate(movies):
            if util.abortRequested() or (not(HIDE_MOVIES) and progress.iscanceled()):
                self.writeResume(count)
                break
            if count >= resume:
                if not(HIDE_MOVIES):
                    progress.update((count * 100) // total, "%s %s" % (util.l("Searching_for"), movie["label"]))
                updated += self.updateMovie(movie)
        else:
            util.deleteF("resume_movies")
            util.writeDate("movies")

        text = "%s: %s %s %s %s!" % (util.l("Movies_ratings_summary"), updated, util.l("of"), total, util.l("were_updated"))
        util.log(text)

        if HIDE_MOVIES:
            util.notification(text)
        else:
            progress.close()
            util.dialogOk(util.l("Completed"), text)
Beispiel #29
0
 def g(self):
     result = yield db.user_shuo.find_one({'user': self.current_user})
     util.log( "MyShuoHandler.get" + str(result))
     if result:
         util.write(self, 1, "", result)
     else:
         util.errorHandle(self, 1)
Beispiel #30
0
 def new_file(self, session, filename, **kwargs):
     """
     See :meth:`FileStore.new_file`
     """
     self.delete_files(session, filename)
     log("FS Creating %s/%s"%(session, filename))
     return FileStoreSQLAlchemy.DBFileWriter(self, session, filename)
Beispiel #31
0
#!/usr/bin/python
# -*- coding: utf-8 -*-
import _six as six
import re
import greentest
import unittest
import socket
from time import time
import gevent
import gevent.socket as gevent_socket
from util import log
from _six import xrange

resolver = gevent.get_hub().resolver
log('Resolver: %s', resolver)

if getattr(resolver, 'pool', None) is not None:
    resolver.pool.size = 1

RESOLVER_IS_ARES = 'ares' in gevent.get_hub().resolver_class.__module__

assert gevent_socket.gaierror is socket.gaierror
assert gevent_socket.error is socket.error

DEBUG = False


def _run(function, *args):
    try:
        result = function(*args)
        assert not isinstance(result, BaseException), repr(result)
Beispiel #32
0
def insertIntoDatabase(session_path, tsv_data):
    """
    Converts all possible metadata to Eyra format (see e.g. Backend/db/schema_setup.sql)

    tsv_data with metadata 
        "eyra_id unique_id transcript speaker place_condition speaker_gender
         device device_serial_number device_IMEI device_android_id prologue_length 
         epilogue_length total_audio_samples audio_sample_size sample_coding cluster_id 
         sample_rate atomic_type atomic_size audio_sample_type"

    if a prompt in tsv_data is not in the database, it is added, with valid=0 and prompLabel='ex'

    returns newly created session_id

    """

    session_name = os.path.basename(session_path)
    log("Processing session: {}".format(session_name))

    placeholder = 'N/A'
    session_id = None
    start_time = str(uuid.uuid4())  # uuid just to bypass unique constraints

    i = 0
    for row in tsv_data:
        if i == 0:
            # skip header
            i += 1
            continue
        if row[15] == session_name:
            filename = row[0]
            prompt = row[2]
            speaker = row[3]
            environment = row[4]
            gender = row[5]
            device = row[6]
            device_android_id = row[
                9]  # use the android id since that's what the app uses currently

            try:
                cur = _db.cursor()

                # device
                cur.execute(
                    'SELECT id FROM device WHERE userAgent=%s AND imei=%s',
                    (device, device_android_id))
                device_id = cur.fetchone()
                if not device_id:
                    # if we haven't already inserted device, do it now
                    cur.execute(
                        'INSERT INTO device (userAgent, imei) VALUES (%s, %s)',
                        (device, device_android_id))
                    device_id = cur.lastrowid
                else:
                    device_id = device_id[0]
                # speaker
                cur.execute(
                    'SELECT id FROM speaker WHERE name=%s AND deviceImei=%s',
                    (speaker, device_android_id))
                speaker_id = cur.fetchone()
                if not speaker_id:
                    cur.execute(
                        'INSERT INTO speaker (name, deviceImei) VALUES (%s, %s)',
                        (speaker, device_android_id))
                    speaker_id = cur.lastrowid
                    # speaker_info
                    cur.execute(
                        'INSERT INTO speaker_info (speakerId, s_key, s_value) VALUES (%s, %s, %s)',
                        (speaker_id, "gender", gender))
                else:
                    speaker_id = speaker_id[0]
                # token
                cur.execute('SELECT id FROM token WHERE inputToken=%s',
                            (prompt, ))
                token_id = cur.fetchone()
                if not token_id:
                    # token must not be in database, simply add it
                    cur.execute(
                        'INSERT INTO token (inputToken, valid, promptLabel) VALUES (%s, %s, %s)',
                        (prompt, str(0), 'ex'))
                    token_id = cur.lastrowid
                else:
                    token_id = token_id[0]
                # session
                cur.execute(
                    'SELECT id FROM session WHERE speakerId=%s AND instructorId=%s \
                             AND deviceId=%s AND location=%s AND start=%s AND end=%s and comments=%s',
                    (speaker_id, 1, device_id, 'custom location', start_time,
                     placeholder, environment))
                session_id = cur.fetchone()
                if not session_id:
                    cur.execute(
                        'INSERT INTO session (speakerId, instructorId, deviceId, location, start, end, comments) \
                                 VALUES (%s, %s, %s, %s, %s, %s, %s)',
                        (speaker_id, 1, device_id, 'custom location',
                         start_time, placeholder, environment))
                    session_id = cur.lastrowid
                else:
                    session_id = session_id[0]
                # recording
                cur.execute(
                    'INSERT INTO recording (tokenId, speakerId, sessionId, filename) \
                             VALUES (%s, %s, %s, %s)',
                    (token_id, speaker_id, session_id, filename))
            except MySQLdb.Error as e:
                msg = 'Error inserting info into database.'
                log(msg, e)
                raise
        i += 1

    if session_id is not None:
        return session_id
    else:
        raise ValueError(
            'Warning, no data found for current session, could be a session folder not in .tsv, \
            aborting (remember that files are still copied to EYRA_ROOT even though the db is untouched.'
        )
Beispiel #33
0
        os.path.join(base, 'fighter', name, 'motion', 'body', 'main.pac')
    }


if __name__ == '__main__':
    util.log_level = util.LOG_INFO
    paths = get_fighter_paths('S:\\SSB4\\extracted_content', 'yoshi')
    bones, bone_uids = fmt_vbn.read(paths['vbn'])
    archive = fmt_pac.read(paths['pac'])

    # convert OMOs into JSON animation format
    animations = []
    for f, f_data in archive.items():
        m = re.match('.+([A-Z])\d{2}(.+)\.([a-z]{3})', f)
        if m.group(3) == 'omo' and (m.group(1) == 'A'):
            util.log('\nReading OMO', f, level=util.LOG_INFO)
            motion = fmt_omo.read(f_data,
                                  debug_bones=bones,
                                  debug_bone_ids=bone_uids)
            animation = {
                'name': m.group(2),
                'fps': 30,
                'length': motion['frame_count'] / 30,
                'hierarchy': [{
                    'parent': b['parent'],
                    'keys': []
                } for b in bones]
            }
            if motion['unknown']:
                animation['name'] += '_INCOMPLETE'
Beispiel #34
0
 def __init__(s, dists_node, safe_distance, origin=np.matrix([0, 0]).T):
     s.dists_node = dists_node
     s.safe_distance = safe_distance
     s.origin = origin
     s.log = ut.log("TooClose")
    def predict(self, X_test, time_remain):
        self.Time_data_info['time_ramain_so_far'] = time_remain

        start_feature = time.time()

        Xs = self.tables
        main_table = Xs[MAIN_TABLE_NAME]

        #index = main_table.sort_values(by=self.config['time_col']).index
        #split = int(0.6*len(index))
        #train_index, test_index = index[:split], index[split:]

        #log(f"Merge train and test tables...")
        main_table = pd.concat([main_table, X_test], keys=['train', 'test'])
        main_table.index = main_table.index.map(lambda x: f"{x[0]}_{x[1]}")
        Xs[MAIN_TABLE_NAME] = main_table

        log(f"Feature engineering...")
        clean_tables(Xs)
        X = merge_table(Xs, self.config)
        X = clean_df(X)
        X = feature_engineer(X, self.config)

        X_train = X[X.index.str.startswith("train")]
        X_train.index = X_train.index.map(lambda x: int(x.split('_')[1]))
        X_train.sort_index(inplace=True)
        y_train = self.targets

        end_feature = time.time()

        self.Time_data_info['time_for_feature_engineering'] = (end_feature -
                                                               start_feature)

        self.Time_data_info['time_ramain_so_far'] = self.Time_data_info[
            'time_ramain_so_far'] - self.Time_data_info[
                'time_for_feature_engineering']

        #self.Time_data_info['data_cols_for_hp'] = X.shape[1]
        #self.Time_data_info['data_rows_for_hp'] = X.shape[0]
        print(f"TIME info:", self.Time_data_info)

        # train model
        log(f"Training...")
        train_start = time.time()
        #train(X_train.iloc[train_index], y_train.iloc[train_index], self.config)

        timetrain(X_train, y_train, self.config, self.Time_data_info)

        #train with time limitation
        #timetrain(X_train.iloc[train_index], y_train.iloc[train_index], self.config, self.Time_data_info)

        train_end = time.time()

        self.Time_data_info['time_ramain_so_far'] = self.Time_data_info[
            'time_ramain_so_far'] - (train_end - train_start)
        self.Time_data_info['time_for_model_train'] = (train_end - train_start)

        print("TIME info:", self.Time_data_info)

        #r = predict(X_train.iloc[test_index], self.config)
        #r = timepredict(X_train.iloc[test_index], self.config)

        #print('Test auc: ', roc_auc_score(y_train.iloc[test_index], r))

        #importance = self.config["model"].feature_importance(importance_type='split')
        #feature_name = np.array(self.config["model"].feature_name())
        #feature_importance = pd.DataFrame({'feature_importance': feature_name[np.argsort(-importance)], 'importnace':-np.sort(-importance)})
        #feature_importance.to_csv('feature_importance.csv', index=False)

        # predict
        log(f"Predicting...")
        X_test = X[X.index.str.startswith("test")]
        X_test.index = X_test.index.map(lambda x: int(x.split('_')[1]))
        X_test.sort_index(inplace=True)
        result = predict(X_test, self.config)

        return pd.Series(result)
Beispiel #36
0
from course import get_today_schedule, get_tomorrow_schedule
from getTimeTable import getTimeTable
from login import getCookie
from config import config
from accouts import accouts
from mail import send_schedule
from util import log

logger = log()

for user in accouts:
    cookie = getCookie(user["username"], user["password"])
    if cookie is None:
        logger.error("user " + user["username"] +
                     "login fail, maybe the password is wrong")
    else:
        schedule = get_tomorrow_schedule(getTimeTable(cookie))
        send_schedule(user["email"], schedule)
Beispiel #37
0
def log_call(result, time, function, *args):
    log(format_call(function, args))
    log_fresult(result, time)
Beispiel #38
0
    with open(opts.cluster_spec_path, 'r') as f:
        cluster_spec = json.load(f)
    with open(opts.cfg_path, 'r') as f:
        cfg = json.load(f)
    assert (os.path.exists(util.gd_tfr_dir(gd_id=cfg['gd_id'])))

    if opts.job_name == "ps" and opts.task_index == 0:
        cfg['train_id'] = util.db_insert(table='train_runs',
                                         **cfg,
                                         git_commit=util.get_commit())
    else:
        time.sleep(5)
        cfg['train_id'] = util.db_query_one(
            'select max(train_id) as train_id from train_runs')['train_id']

    try:
        main(opts, cluster_spec=cluster_spec, cfg=cfg)
    except tf.errors.OpError as e:
        tb = traceback.format_exc()
        util.log(kind='error',
                 author='train',
                 msg="OP_ERROR\n%s\n%s" % (str(e), tb))
    except Exception as e:
        tb = traceback.format_exc()
        util.log(kind='error',
                 author='train',
                 msg="EXCEPTION\n%s\n%s" % (str(e), tb))
    except:
        tb = traceback.format_exc()
        util.log(kind='error', author='train', msg="UNKNOWN\n%s" % (tb))
Beispiel #39
0
    def processBatch(self, name, session_id, indices) -> bool:
        """
        The main processing function of this module. This function 
        is called to do processing on a batch of recordings from the session.

        Parameters:

            name        the name to use to write the report to redis datastore
                        at 'report/name/session_id'
            session_id  id of session
            indices     indices in the list of recordings in the redis 
                        datastore ('session/session_id/recordings') to process
                        in this batch. 
                        indices=[] indicates no processing should
                        be done at this stage (return True, no new recordings to process)

        Return:
            False or raise an exception if something is wrong (and
            this should not be called again.)
        """

        if indices == []:
            return True

        beam = 15.0
        max_active = 750
        lattice_beam = 8.0
        acoustic_scale = 0.1

        oldDir = os.getcwd()
        try:
            # change to directory of this file (for using relative paths)
            os.chdir(os.path.dirname(os.path.realpath(__file__)))

            # set up commands for sh
            compute_mfcc_feats = sh.Command('{}/src/featbin/compute-mfcc-feats'
                                            .format(self.common.kaldi_root))
            gmm_latgen_faster = sh.Command('{}/src/gmmbin/gmm-latgen-faster'
                                            .format(self.common.kaldi_root))
            lattice_oracle = sh.Command('{}/src/latbin/lattice-oracle'
                                        .format(self.common.kaldi_root))

            # grab the recordings list for this session
            recordings = json.loads(self.redis.get('session/{}/recordings'.format(session_id)).decode('utf8'))
            # only use recordings[indices] as per our batch
            recordings = [recordings[i] for i in indices]

            # make a new temp .scp file which will signify a .scp file only for our tokens
            #   in these recordings
            _, tokens_graphs_scp_path = tempfile.mkstemp(prefix='qc')
            # other tempfiles
            _, mfcc_feats_scp_path = tempfile.mkstemp(prefix='qc')
            _, mfcc_feats_path = tempfile.mkstemp(prefix='qc')
            _, tokens_path = tempfile.mkstemp(prefix='qc')
            _, edits_path = tempfile.mkstemp(prefix='qc')
            with open(tokens_path, 'wt') as tokens_f, \
                    open(mfcc_feats_scp_path, 'w') as mfcc_feats_tmp, \
                    open(tokens_graphs_scp_path, 'w') as tokens_graphs_scp:

                graphs_scp = [] # will contain list of lines in scp script referencing relevant decoded graphs
                for r in recordings:
                    if self.common.downsample:
                        print('{rec_id} sox {rec_path} -r{sample_freq} -t wav - |'
                                .format(rec_id=r['recId'],
                                        rec_path=r['recPath'],
                                        sample_freq=self.common.sample_freq),
                              file=mfcc_feats_tmp)
                    else:
                        print('{} {}'.format(r['recId'], r['recPath']),
                              file=mfcc_feats_tmp)

                    token_ids = ' '.join(self.common.sym_id_map.get(tok, self.common.oov_id) for
                                         tok in r['token'].split())
                    print('{} {}'.format(r['recId'], token_ids),
                          file=tokens_f)

                    # create file with 'recId arkref'
                    graphs_scp.append('{} {}'.format(r['recId'],
                                        ' '.join(self.decodedScpLines[str(r['tokenId'])].split(' ')[1:])))

                # make sure .scp file is sorted on keys
                graphs_scp = sorted(graphs_scp, key=lambda x: x.split()[0])
                for line in graphs_scp:
                    print(line, file=tokens_graphs_scp)

            try:
                # MFCCs are needed at 2 stages of the pipeline so we have to dump on disk
                compute_mfcc_feats(
                    '--sample-frequency={}'.format(self.common.sample_freq),
                    '--use-energy=false',
                    'scp,p:{}'.format(mfcc_feats_scp_path),
                    'ark:{}'.format(mfcc_feats_path)
                )

                compute_cmvn_cmd = ('{kaldi_root}/src/featbin/compute-cmvn-stats ' +
                                    '"ark,p:{mfcc_feats_path}" ' +
                                    '"ark:-" ').format(mfcc_feats_path=mfcc_feats_path,
                                                       kaldi_root=self.common.kaldi_root)
                feats_cmd = ('{kaldi_root}/src/featbin/apply-cmvn ' +
                             '"ark,p:{compute_cmvn_cmd} |" ' +
                             '"ark:{mfcc_feats_path}" ' +
                             '"ark:| {kaldi_root}/src/featbin/add-deltas ark:- ark:-" '
                            ).format(compute_cmvn_cmd=compute_cmvn_cmd,
                                     mfcc_feats_path=mfcc_feats_path,
                                     kaldi_root=self.common.kaldi_root)

                # create a pipe using sh, output of gmm_latgen_faster piped into lattice_oracle
                # piping in contents of tokens_graphs_scp_path and writing to edits_path
                # note: be careful, as of date sh seems to swallow exceptions in the inner pipe
                #   https://github.com/amoffat/sh/issues/309
                lattice_oracle( 
                    gmm_latgen_faster(
                        sh.cat(
                            tokens_graphs_scp_path,
                            _piped=True,
                            _err=errLog
                        ),
                        '--acoustic-scale={}'.format(acoustic_scale),
                        '--beam={}'.format(beam),
                        '--max-active={}'.format(max_active),
                        '--lattice-beam={}'.format(lattice_beam),
                        '--word-symbol-table={}'.format(self.common.sym_id_path),
                        '{}'.format(self.common.acoustic_model_path),
                        'scp,p:-',
                        'ark,p:{} |'.format(feats_cmd),
                        'ark:-',
                        _piped=True,
                        _err=errLog
                    ),
                    'ark,p:-', 
                    'ark,p:{ref_tokens}'.format(ref_tokens=tokens_path), 
                    'ark,p:/dev/null', 
                    'ark,t:-',
                    _out=edits_path
                )

                with open(edits_path, 'rt') as edits_f:
                    edits = dict((int(rec_id), int(n_edits)) for rec_id, n_edits
                                 in (line.strip().split() for line in edits_f))
            except sh.ErrorReturnCode_1 as e:
                # No data (e.g. all wavs unreadable)
                edits = {}
                log('e.stderr: ', e.stderr)
        finally:
            os.chdir(oldDir)

        # We should return something like this
        # {"sessionId": ...,
        # "requestId": ...,
        # "totalStats": {"accuracy": [0.0;1.0]"},
        # "perRecordingStats": [{"recordingId": ...,
        #                        "stats": {"accuracy": [0.0;1.0]},
        #                         }]}
        qc_report = {"sessionId": session_id,
                     "requestId": str(uuid.uuid4()), # just use a uuid
                     "totalStats": {"accuracy": 0.0},
                     "perRecordingStats": []}

        cum_accuracy = 0.0
        for r in recordings:
            error = ''
            try:
                wer = edits[r['recId']] / len(r['token'].split())
            except KeyError as e:
                # Kaldi must have choked on this recording for some reason
                if isWavHeaderOnly(r['recPath']):
                    error = 'wav_header_only'
                    log('Error, only wav header in recording: {} for session: {}'
                        .format(r['recId'], session_id))
                else:
                    # unknown error
                    error = 'unknown_error'
                    log('Error, unknown error processing recording: {} for session {}'
                        .format(r['recId'], session_id))

            if not error:
                error = 'no_error'
                accuracy = 0.0 if 1 - wer < 0 else 1 - wer
            else:
                accuracy = 0.0

            cum_accuracy += accuracy

            prec = qc_report['perRecordingStats']
            stats = {"accuracy": accuracy, "error": error}
            prec.append({"recordingId": r['recId'], "stats": stats})

        try:
            avg_accuracy = cum_accuracy / len(qc_report['perRecordingStats'])
        except ZeroDivisionError:
            avg_accuracy = 0.0
        else:
            qc_report['totalStats']['accuracy'] = avg_accuracy

        # TODO: Do this more efficiently. Need to change how we store reports.
        str_report = self.redis.get('report/{}/{}'.format(name, session_id))
        if str_report:
            old_report = json.loads(str_report.decode('utf-8'))
            newAvgAccuracy = (old_report['totalStats']['accuracy'] + qc_report['totalStats']['accuracy']) / 2
            qc_report = update(old_report, qc_report)
            qc_report['totalStats']['accuracy'] = newAvgAccuracy

        self.redis.set('report/{}/{}'.format(name, session_id), 
                       json.dumps(qc_report))

        return True
Beispiel #40
0
def main(opts, cluster_spec, cfg):
    util.log(author='%s:%d' % (opts.job_name, opts.task_index),
             msg='starting @ %s' % util.get_hostname(expensive=True))
    cluster = tf.train.ClusterSpec(cluster_spec)
    server = tf.train.Server(cluster,
                             job_name=opts.job_name,
                             task_index=opts.task_index)

    if opts.job_name == "ps":
        util.log(author='%s:%d' % (opts.job_name, opts.task_index),
                 msg='joining server')
        server.join()
        raise Exception("Expecting server.join() to block forever")

    assert (opts.job_name == "worker")
    is_chief = (opts.task_index == 0)

    outqueue = Queue()
    train_post_thread = TrainPostThread(cfg, outqueue)
    train_post_thread.start()

    with tf.device("/job:ps/task:0"):
        params = NeuroSATParams(cfg=cfg)

    with tf.device(
            tf.train.replica_device_setter(
                worker_device="/job:worker/task:%d" % opts.task_index,
                cluster=cluster)):
        filenames = [
            os.path.join(util.gd_tfr_dir(gd_id=cfg['gd_id']), x)
            for x in os.listdir(util.gd_tfr_dir(gd_id=cfg['gd_id']))
        ]
        dataset = tf.data.TFRecordDataset(
            filenames=filenames,
            compression_type="GZIP",
            num_parallel_reads=cfg['n_parallel_reads'])
        # TODO(dselsam): don't hardcode the number of shards
        # idea: extend cluster_spec to map (job, task) -> (n_shards, shard_idx)
        dataset = dataset.shard(num_shards=4, index=opts.task_index % 4)
        dataset = dataset.map(example_to_tftd,
                              num_parallel_calls=cfg['n_parallel_calls'])
        dataset = dataset.filter(
            lambda tftd: 2 * tftd.n_vars + tftd.n_clauses < cfg['max_n_nodes'])
        dataset = dataset.repeat()
        dataset = dataset.prefetch(cfg['n_prefetch'])

        tftd = dataset.make_one_shot_iterator().get_next()

        args = NeuroSATArgs(n_vars=tftd.n_vars,
                            n_clauses=tftd.n_clauses,
                            CL_idxs=tftd.CL_idxs)
        guesses = apply_neurosat(cfg=cfg, params=params, args=args)

        pi_v_targets = tf.cast(tftd.core_var_mask, tf.float32)
        pi_v_targets = pi_v_targets / tf.reduce_sum(pi_v_targets)

        pi_c_targets = tf.cast(tftd.core_clause_mask, tf.float32)
        pi_c_targets = pi_c_targets / tf.reduce_sum(pi_c_targets)

        cv_loss = cfg['cv_loss_scale'] * tfutil.kldiv(
            logits=guesses.pi_core_var_logits, labels=pi_v_targets)
        cc_loss = cfg['cc_loss_scale'] * tfutil.kldiv(
            logits=guesses.pi_core_clause_logits, labels=pi_c_targets)
        l2_loss = cfg['l2_loss_scale'] * tfutil.build_l2_loss()
        loss = cv_loss + cc_loss + l2_loss

        stats = Stats(dp_id=tftd.dp_id,
                      cv_loss=cv_loss,
                      cc_loss=cc_loss,
                      l2_loss=l2_loss)

        global_step = tf.train.get_or_create_global_step()
        learning_rate = tfutil.build_learning_rate(cfg, global_step)

        apply_grads = tf.cond(
            tftd.is_train, lambda: tfutil.build_apply_gradients(
                cfg, loss, learning_rate, global_step), lambda: True)

    util.log(author='%s:%d' % (opts.job_name, opts.task_index),
             msg='creating session (train_id=%d)...' % cfg['train_id'])
    with tf.train.MonitoredTrainingSession(
            master=server.target,
            is_chief=is_chief,
            checkpoint_dir=util.checkpoint_dir(
                train_id=cfg['train_id'])) as mon_sess:
        util.log(author='%s:%d' % (opts.job_name, opts.task_index),
                 msg='starting session loop')
        step = 0
        while True:
            try:
                (_, stats_v), n_secs = util.timeit(mon_sess.run,
                                                   [apply_grads, stats])
                outqueue.put(
                    tuple(map(util.de_numpify, stats_v)) +
                    (cfg['train_id'], n_secs))
                step += 1
            except tf.errors.ResourceExhaustedError as e:
                tb = traceback.format_exc()
                util.log(kind='error',
                         author='train',
                         msg="RESOURCE_EXHAUSTED\n%s\n%s" % (str(e), tb))
                util.db_insert(table='tune_ooms', train_id=cfg['train_id'])
            except tf.errors.OpError as e:
                tb = traceback.format_exc()
                util.log(kind='error',
                         author='train',
                         msg="OP_ERROR\n%s\n%s" % (str(e), tb))
            except Exception as e:
                tb = traceback.format_exc()
                util.log(kind='error',
                         author='train',
                         msg="EXCEPTION\n%s\n%s" % (str(e), tb))
            except:
                tb = traceback.format_exc()
                util.log(kind='error',
                         author='train',
                         msg="UNKNOWN\n%s" % (tb))
Beispiel #41
0
                if t < tend:
                    spline_idx = j
                    break
            a, b, c, d = qkeys[i][spline_idx]
            tend = qtimes[i][spline_idx]
            if spline_idx == 0:
                tbeg = 0.0
            else:
                tbeg = qtimes[i][spline_idx - 1]
            t_perc = (t - tbeg) / (tend - tbeg)
            v = a * (t_perc**3) + b * (t_perc**2) + c * t_perc + d
            all_qvalues[i].append(v)

    for i in xrange(len(all_qtimes)):
        x, y, z, w = all_qvalues[0][i], all_qvalues[1][i], all_qvalues[2][
            i], all_qvalues[3][i]
        sqrsum = x**2 + y**2 + z**2 + w**2
        # log("t = %f, q: (%f, %f, %f, %f), sqrsum=%f" % (all_qtimes[i], x, y, z, w, sqrsum), lv=0)
        assert math.fabs(sqrsum - 1.0) < 1e-2, str(math.fabs(sqrsum - 1.0))


if __name__ == '__main__':
    f = open(sys.argv[1], "rb")
    data = f.read()
    f.close()

    set_log_level(0)
    bone_names = game_util.parse_bone_names_using_g1a_path(sys.argv[1])
    log("Total %d bone names parsed" % len(bone_names), lv=0)

    parse(data, bone_names)
Beispiel #42
0
def main():
  args = sys.argv[1:]
  upload               = test_for_flag(args, "-upload")
  upload_tmp           = test_for_flag(args, "-uploadtmp")
  testing              = test_for_flag(args, "-test") or test_for_flag(args, "-testing")
  build_test_installer = test_for_flag(args, "-test-installer") or test_for_flag(args, "-testinst") or test_for_flag(args, "-testinstaller")
  build_rel_installer  = test_for_flag(args, "-testrelinst")
  build_prerelease     = test_for_flag(args, "-prerelease")
  skip_transl_update   = test_for_flag(args, "-noapptrans")
  svn_revision         = test_for_flag(args, "-svn-revision", True)
  target_platform      = test_for_flag(args, "-platform", True)

  if len(args) != 0:
    usage()
  verify_started_in_right_directory()

  if build_prerelease:
    if svn_revision is None:
      run_cmd_throw("svn", "update")
      (out, err) = run_cmd_throw("svn", "info")
      ver = str(parse_svninfo_out(out))
    else:
      # allow to pass in an SVN revision, in case SVN itself isn't available
      ver = svn_revision
  else:
    ver = extract_sumatra_version(os.path.join("src", "Version.h"))
  log("Version: '%s'" % ver)

  # don't update translations for release versions to prevent Trunk changes
  # from messing up the compilation of a point release on a branch
  if build_prerelease and not skip_transl_update:
    trans_upload.uploadStringsIfChanged()
    changed = trans_download.downloadAndUpdateTranslationsIfChanged()
    # Note: this is not a perfect check since re-running the script will
    # proceed
    if changed:
      print("\nNew translations have been downloaded from apptranslator.og")
      print("Please verify and checkin src/Translations_txt.cpp and strings/translations.txt")
      sys.exit(1)

  filename_base = "SumatraPDF-%s" % ver
  if build_prerelease:
    filename_base = "SumatraPDF-prerelease-%s" % ver

  s3_dir = "sumatrapdf/rel"
  if build_prerelease:
    s3_dir = "sumatrapdf/prerel"
  if upload_tmp:
    upload = True
    s3_dir += "tmp"

  if upload:
    log("Will upload to s3 at %s" % s3_dir)
    conf = load_config()
    s3.set_secrets(conf.aws_access, conf.aws_secret)
    s3.set_bucket("kjkpub")

  s3_prefix = "%s/%s" % (s3_dir, filename_base)
  s3_exe           = s3_prefix + ".exe"
  s3_installer     = s3_prefix + "-install.exe"
  s3_pdb_zip       = s3_prefix + ".pdb.zip"
  s3_exe_zip       = s3_prefix + ".zip"

  s3_files = [s3_exe, s3_installer, s3_pdb_zip]
  if not build_prerelease:
    s3_files.append(s3_exe_zip)

  cert_pwd = None
  cert_path = os.path.join("scripts", "cert.pfx")
  if upload:
    map(s3.verify_doesnt_exist, s3_files)
    verify_path_exists(cert_path)
    conf = load_config()
    cert_pwd = conf.GetCertPwdMustExist()

  obj_dir = "obj-rel"
  if target_platform == "X64":
    obj_dir += "64"

  if not testing and not build_test_installer and not build_rel_installer:
    shutil.rmtree(obj_dir, ignore_errors=True)
    shutil.rmtree(os.path.join("mupdf", "generated"), ignore_errors=True)

  config = "CFG=rel"
  if build_test_installer and not build_prerelease:
    obj_dir = "obj-dbg"
    config = "CFG=dbg"
  extcflags = ""
  if build_prerelease:
    extcflags = "EXTCFLAGS=-DSVN_PRE_RELEASE_VER=%s" % ver
  platform = "PLATFORM=%s" % (target_platform or "X86")

  (out, err) = run_cmd_throw("nmake", "-f", "makefile.msvc", config, extcflags, platform, "all_sumatrapdf")
  if build_test_installer: print_run_resp(out, err)

  exe = os.path.join(obj_dir, "SumatraPDF.exe")
  if upload:
    sign(exe, cert_pwd)
    sign(os.path.join(obj_dir, "uninstall.exe"), cert_pwd)

  build_installer_data(obj_dir)
  (out, err) = run_cmd_throw("nmake", "-f", "makefile.msvc", "Installer", config, platform, extcflags)
  if build_test_installer: print_run_resp(out, err)

  if build_test_installer or build_rel_installer:
    sys.exit(0)

  installer = os.path.join(obj_dir, "Installer.exe")
  if upload:
    sign(installer, cert_pwd)

  pdb_archive = create_pdb_archive(obj_dir, "%s.pdb.lzma" % filename_base)

  builds_dir = os.path.join("builds", ver)
  if os.path.exists(builds_dir):
    shutil.rmtree(builds_dir)
  os.makedirs(builds_dir)

  copy_to_dst_dir(exe, builds_dir)
  copy_to_dst_dir(installer, builds_dir)
  copy_to_dst_dir(pdb_archive, builds_dir)

  # package portable version in a .zip file
  if not build_prerelease:
    exe_zip_name = "%s.zip" % filename_base
    zip_one_file(obj_dir, "SumatraPDF.exe", exe_zip_name)
    exe_zip_path = os.path.join(obj_dir, exe_zip_name)
    copy_to_dst_dir(exe_zip_path, builds_dir)

  if not upload: return

  if build_prerelease:
    jstxt  = 'var sumLatestVer = %s;\n' % ver
    jstxt += 'var sumBuiltOn = "%s";\n' % time.strftime("%Y-%m-%d")
    jstxt += 'var sumLatestName = "%s";\n' % s3_exe.split("/")[-1]
    jstxt += 'var sumLatestExe = "http://kjkpub.s3.amazonaws.com/%s";\n' % s3_exe
    jstxt += 'var sumLatestPdb = "http://kjkpub.s3.amazonaws.com/%s";\n' % s3_pdb_zip
    jstxt += 'var sumLatestInstaller = "http://kjkpub.s3.amazonaws.com/%s";\n' % s3_installer

  s3.upload_file_public(installer, s3_installer)
  s3.upload_file_public(pdb_archive, s3_pdb_zip)
  s3.upload_file_public(exe, s3_exe)

  if build_prerelease:
    s3.upload_data_public(jstxt, "sumatrapdf/sumatralatest.js")
    txt = "%s\n" % ver
    s3.upload_data_public(txt, "sumatrapdf/sumpdf-prerelease-latest.txt")
    delete_old_pre_release_builds()
  else:
    s3.upload_file_public(exe_zip_path, s3_exe_zip)
Beispiel #43
0
            return

        if self.path == '/pulse':
            brick.heartbeat()
            self.send_response(200)
            self.end_headers()
            return

        # CGIHTTPRequestHandler.do_GET(self)

    # Handle POST requests
    def do_POST(self):
        content_len = int(self.headers.get('content-length'))
        post_body = self.rfile.read(content_len)
        self.send_response(200)
        self.end_headers()

        # Convert to JSON
        body = post_body.decode("utf-8", "replace")
        data = json.loads(body)
        brick.handler(data)
        return


httpd = Server(("", PORT), Handler)
try:
    log("Start serving at port %i" % PORT)
    httpd.serve_forever()
except KeyboardInterrupt:
    pass
httpd.server_close()
Beispiel #44
0
def parse(data, bone_names=()):
    get = get_getter(data, ">")
    fourcc = get(0x0, "8s")
    assert fourcc == "G1A_0042", "invalid fourcc"
    file_size = get(0x8, "I")
    assert len(data) == file_size * 0x10, "file size not match!"
    unk0 = get(0xc, "H")
    assert unk0 in (0x1c, 0x410, 0x14), "not header size, unknown"
    # 0x14 is used in Field model
    unk1, unk2 = get(0xe, "2B")
    assert 0 <= unk1 <= 2
    assert unk2 == 1
    unk_f = get(0x10, "f")
    # count(locals(), "unk_f")

    unk3 = get(
        0x14,
        "I")  # unk * 0x10 points to some offset of a specific data block below
    # specific block, i.e. keyframe block

    # 0x18 ~ 0x30
    # may be reserved fields
    unk0x18 = get(0x18, "I")
    assert unk0x18 == 0
    unk0x1c = get(0x1c, "I")
    assert unk0x1c == 0
    unk0x20 = get(0x20, "I")
    assert unk0x20 == 1
    unk0x24 = get(0x24, "I")
    assert unk0x24 == 0
    unk0x28 = get(0x28, "I")
    assert unk0x28 == 0
    unk0x2c = get(0x2c, "I")
    assert unk0x2c == 0

    # 0x30 ~
    unk0x30, unk0x32 = get(0x30, "2H")
    # unk0x30: mapping count
    # unk0x32: max index
    assert unk0x30 == unk0x32 + 1 or unk0x30 == unk0x32
    values1 = []
    off = 0x34
    bone_indices = []
    block_info_offsets = []
    for i in xrange(unk0x30):
        values1.append(get(off, "2I"))  # (bone_index, )
        off += 0x8
        # log("%d, %d" % values1[-1], lv=0)
        block_info_offsets.append(values1[-1][1] * 0x10 + 0x30)
        # Example: MOB02A
        # bone count = 77
        # unk0x30 = 76, unk0x32 = 76
        # bone 50: Bip001_R_Calf, this bone maps to nothing
        assert values1[-1][0] <= unk0x32  # passed

    values2 = []
    # padding to 0x10 here
    rem = off % 0x10
    if rem != 0:
        off += 0x10 - rem

    block_info = []
    for i in xrange(unk0x30):
        assert off == block_info_offsets[i], "0x%x vs 0x%x" % (
            off, block_info_offsets[i])
        if unk0 == 0x410:  # morph animation
            size, _off = get(off + 0x4, "2I")
            block_info.append((off + _off * 0x10, size))
            off += 0x10
        else:  # bone animation
            # print "bone %d:" % i
            cnt = get(off, "I")
            assert cnt == 6  # 6 tracks in order of sx, sy, sz, rx, ry, rz, rw, x, y, z
            for j in xrange(10):
                size, _off = get(off + 0x4 + j * 0x8, "2I")  # size ?
                block_info.append((off + _off * 0x10, size))
            # some reserved fields
            assert not any(get(off + 0x4 + 10 * 0x8, "3I")), get(
                off + 0x4 + 10 * 0x8, "3I")
            off += 0x60

    # ignore other types a.t.m
    if unk0 != 0x1c:
        return

    track_names = ("scaleX", "scaleY", "scaleZ", "QuatX", "QuatY", "QuatZ",
                   "QuatW", "PosX", "PosY", "PosZ")
    # offset@unk3 * 0x10
    assert off == unk3 * 0x10
    # chunk size may vary
    inv_file = False
    assert off == block_info[0][0], "0x%x vs 0x%x" % (off, block_info[0][0])

    for j, (blk_off, blk_size) in enumerate(block_info):
        assert blk_off == off

        bone_index = j / 10

        if 3 <= j % 10 < 7:
            qidx = j % 10 - 3
        else:
            qidx = -1

        try:
            bone_name = bone_names[bone_index]
        except IndexError:
            bone_name = "----"

        if j % 10 == 0:
            log("\n\nBone: %s" % bone_name, lv=0)
            qkeys = ([], [], [], [])  # x, y, z, w
            qtimes = []

        if j % 10 == 9:
            check_quaternion(qkeys, qtimes)

        log("========== %5s ========== @offset=0x%x" %
            (track_names[j % 10], blk_off),
            lv=0)
        times = get(off + blk_size * 0x10, "%df" % blk_size, force_tuple=True)
        if qidx >= 0:
            qtimes.append(times)
        assert times[
            -1] == unk_f  # keyframe block is always ends with that unknown float
        # this may be keyframe time, so it's in ascending order
        for i in xrange(blk_size - 1):
            assert times[i] <= times[i + 1]
        times_ = (0.0, ) + times
        last_sum = None
        for i in xrange(blk_size):
            k = get(off + i * 0x10, "4f")
            now_sum = sum(k)
            if i == 0:
                eps_txt = ""
            else:
                eps = math.fabs(k[-1] - last_sum)
                eps_txt = "%f" % eps
                # if eps >= 1e-3:
                # 	count(locals(), "eps", fmt="%f")
            log("t=%f\t" % times_[i], k, "eps:", eps_txt, lv=0)
            if qidx >= 0:
                qkeys[qidx].append(k)
            last_sum = now_sum
        log("t=%f\t" % times[-1], lv=0)

        _real_size = blk_size * 0x14
        if _real_size % 0x10 != 0:
            _real_size += 0x10 - (_real_size % 0x10)

        off += _real_size
    correct = 0
    with torch.no_grad():
        for data, target in test_loader:
            data, target = data.to(device), target.to(device)
            output = model(data)
            test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss
            pred = output.data.max(1, keepdim=True)[1] # get the index of the max log-probability
            correct += pred.eq(target.data.view_as(pred)).sum().item()

        test_loss /= len(test_loader.dataset)
        accuracy = 100. * correct / len(test_loader.dataset)
        print(f'Test set: Average loss: {test_loss:.4f}, Accuracy: {correct}/{len(test_loader.dataset)} ({accuracy:.2f}%)')
    return accuracy
'''
use_cuda = not args.no_cuda and torch.cuda.is_available()
#device = torch.device("cuda" if use_cuda else 'cpu')

#model = model.to(device)

#kwargs = {'num_workers': 5, 'pin_memory': True} if use_cuda else {}
'''test_loader = torch.utils.data.DataLoader(
    datasets.MNIST('data', train=False, transform=transforms.Compose([
                       transforms.ToTensor(),
                       transforms.Normalize((0.1307,), (0.3081,))
                   ])),
    batch_size=2048, shuffle=False, **kwargs)
'''
accuracy = util.test(model, use_cuda)
util.log(args.log, f"accuracy_after_huffman  {accuracy}")
#print(" accuracy after huffman ", accuracy)
Beispiel #46
0
 def get_master_state(self):
     url = "http://%s/state.json" % self.leader_pid
     log("Getting master state from: %s" % url)
     return try_get_json(url)
Beispiel #47
0
    def process(self):
        if self.i > 50000:
            self.i = 0
        self.i += 1

        # Pipelined multikey - Request
        rqst = self.api.create_request()
        if rqst == None:
            self.consistency = False
            return False

        self.api.append_command(rqst, self.pipelined_multikey_cmd)

        # Pipelined multikey - Check reply
        try:
            ret = self.api.do_request(rqst, self.timeout)
            if ret != 0:
                self.consistency = False
                return False

            while True:
                be_errno, reply = self.api.get_reply(rqst)
                if be_errno < 0 or reply == None:
                    if be_errno < 0:
                        self.consistency = False
                        return False
                    break

                if is_reply_ok(reply) == False:
                    self.consistency = False
                    return False

        except:
            if self.verbose:
                util.log('Connection closed in LoadGenerator:%s' %
                         self.pipelined_multikey_cmd)
            self.consistency = False
            return False

        # Multi - MSET
        rqst = self.api.create_request()
        if rqst == None:
            self.consistency = False
            return False

        cmd = 'mset 1%s 1 2%s 2 3%s 3 4%s 4 5%s 5 6%s 6' % (
            self.key, self.key, self.key, self.key, self.key, self.key)
        self.api.append_command(rqst, cmd)

        try:
            ret = self.api.do_request(rqst, self.timeout)
            if ret != 0:
                self.consistency = False
                return False

            while True:
                be_errno, reply = self.api.get_reply(rqst)
                if be_errno < 0 or reply == None:
                    if be_errno < 0:
                        self.consistency = False
                        return False
                    break

                if is_reply_ok(reply) == False:
                    self.consistency = False
                    return False

        except:
            if self.verbose:
                util.log('Connection closed in LoadGenerator:%s' % cmd)
            self.consistency = False
            return False

        # Multi - MGET
        rqst = self.api.create_request()
        if rqst == None:
            self.consistency = False
            return False

        cmd = 'mget 1%s 2%s 3%s 4%s 5%s 6%s' % (self.key, self.key, self.key,
                                                self.key, self.key, self.key)
        self.api.append_command(rqst, cmd)

        try:
            ret = self.api.do_request(rqst, self.timeout)
            if ret != 0:
                self.consistency = False
                return False

            while True:
                be_errno, reply = self.api.get_reply(rqst)
                if be_errno < 0 or reply == None:
                    if be_errno < 0:
                        self.consistency = False
                        return False
                    break

                if is_reply_ok(reply) == False:
                    self.consistency = False
                    return False

        except:
            if self.verbose:
                util.log('Connection closed in LoadGenerator:%s' % cmd)
            self.consistency = False
            return False

        # Multi - DEL
        rqst = self.api.create_request()
        if rqst == None:
            self.consistency = False
            return False

        cmd = 'del 1%s 2%s 3%s 4%s 5%s 6%s' % (self.key, self.key, self.key,
                                               self.key, self.key, self.key)
        self.api.append_command(rqst, cmd)

        try:
            ret = self.api.do_request(rqst, self.timeout)
            if ret != 0:
                self.consistency = False
                return False

            while True:
                be_errno, reply = self.api.get_reply(rqst)
                if be_errno < 0 or reply == None:
                    if be_errno < 0:
                        self.consistency = False
                        return False
                    break

                if is_reply_ok(reply) == False:
                    self.consistency = False
                    return False

        except:
            if self.verbose:
                util.log('Connection closed in LoadGenerator:%s' % cmd)
            self.consistency = False
            return False

        # CRC
        rqst = self.api.create_request()
        if rqst == None:
            self.consistency = False
            return False

        cmd = 'crc16 %s %d' % (self.key, self.i)
        self.api.append_command(rqst, cmd)

        try:
            ret = self.api.do_request(rqst, self.timeout)
            if ret != 0:
                self.consistency = False
                return False

            be_errno, reply = self.api.get_reply(rqst)
            if be_errno < 0 or reply == None:
                if be_errno < 0:
                    self.consistency = False
                    return False

            if reply[0] != ARC_REPLY_INTEGER:
                self.consistency = False
                return False

            # CRC - Check consistency
            self.value = crc16.crc16_buff(str(self.i), self.value)
            try:
                if (reply[1] != self.value):
                    if self.verbose:
                        util.log(
                            'Value Error in LoadGenerator, cmd:"%s", reply:%s, value:%d'
                            % (cmd, reply[1], self.value))
                    self.consistency = False
                    return False

            except ValueError:
                if self.verbose:
                    util.log('Value Error in LoadGenerator, ret:%s' %
                             response[:-2])
                self.consistency = False
                return False

        except:
            if self.verbose:
                util.log('Connection closed in LoadGenerator:%s, except' % cmd)
                util.log(sys.exc_info())
                exc_type, exc_value, exc_traceback = sys.exc_info()
                traceback.print_exception(exc_type,
                                          exc_value,
                                          exc_traceback,
                                          limit=3,
                                          file=sys.stdout)
            self.consistency = False
            return False

        return True
Beispiel #48
0
def perform_action(action_parameters, time_limit):
    action_parameters[ACTION_TIME_LIMIT] = time_limit
    if ACTION_UUID not in action_parameters:
        util.log('action.perform_action - %s - create' %
                 action_parameters[ACTION_NAME])
        action_parameters[ACTION_UUID] = storage.action_on_create(
            action_parameters[ACTION_NAME])
    util.log('action.perform_action - %s - start' %
             action_parameters[ACTION_NAME])
    storage.action_on_start(action_parameters[ACTION_UUID])

    action_fns = {
        ACTION_NAME_TEST: test,
        ACTION_NAME_DATASET_PRODUCE: dataset_producer.produce_dataset,
        ACTION_NAME_DATASET_PRODUCE_RECORD:
        dataset_producer.produce_dataset_record,
        ACTION_NAME_DELETE_DATASET_RECORD_WRITERS:
        storage.finish_delete_dataset_record_writers,
        ACTION_NAME_DATASET_ZIP: dataset_zipper.zip_dataset,
        ACTION_NAME_DATASET_ZIP_PARTITION:
        dataset_zipper.zip_dataset_partition,
        ACTION_NAME_DELETE_DATASET: storage.finish_delete_dataset,
        ACTION_NAME_DELETE_MODEL: storage.finish_delete_model,
        ACTION_NAME_DELETE_VIDEO: storage.finish_delete_video,
        ACTION_NAME_EXTRACT_SUMMARY_IMAGES:
        model_trainer.extract_summary_images,
        ACTION_NAME_FRAME_EXTRACTION: frame_extractor.extract_frames,
        ACTION_NAME_TRACKING: tracking.start_tracking,
    }
    action_fn = action_fns.get(action_parameters[ACTION_NAME], None)
    if action_fn is not None:
        try:
            action_fn(action_parameters)
        except Stop as e:
            pass
        except:
            util.log(
                'action.perform_action - %s exception!!! action_parameters: %s traceback: %s'
                % (action_parameters[ACTION_NAME], str(action_parameters),
                   traceback.format_exc().replace('\n', ' ... ')))
    else:
        util.log('action.perform_action - %s - action_fn is None' %
                 action_parameters[ACTION_NAME])

    util.log('action.perform_action - %s - stop' %
             action_parameters[ACTION_NAME])
    storage.action_on_stop(action_parameters[ACTION_UUID])
    if ACTION_RETRIGGERED not in action_parameters:
        util.log('action.perform_action - %s - destroy' %
                 action_parameters[ACTION_NAME])
        storage.action_on_destroy(action_parameters[ACTION_UUID])
Beispiel #49
0
 def _wait_for_last_ACK(self):
   while self.sender_base < self.next_sequence_number-1:
     util.log("Waiting for last ACK from receiver with sequence # "
              + str(int(self.next_sequence_number-1)) + ".")
     time.sleep(1)
Beispiel #50
0
    def process(self):
        if self.op_rotate == 'write':
            self.op_rotate = 'read'
        elif self.op_rotate == 'read':
            self.op_rotate = 'both'
        elif self.op_rotate == 'both':
            self.op_rotate = 'write'

        rqst = self.arcci.create_request()
        if rqst == None:
            return False

        try:
            # Make request and command(s)
            if self.pattern == 'singlekey':
                rand = random.random() * 10000

                if self.op_rotate == 'read':
                    cmd = 'get %s_%d' % (self.key, rand)
                else:
                    cmd = 'set %s_%d %s' % (self.key, rand, rand)

                self.arcci.append_command(rqst, cmd)

            elif self.pattern == 'range-multikey':
                kv = ''
                for i in xrange(10):
                    rand = random.random() * 10000
                    if self.op_rotate == 'read':
                        kv += '%s_%d ' % (self.key, rand)
                    else:
                        kv += '%s_%d %s ' % (self.key, rand, rand)

                if self.op_rotate == 'read':
                    self.arcci.append_command(rqst, 'mget %s' % kv.strip())
                else:
                    self.arcci.append_command(rqst, 'mset %s' % kv.strip())

            elif self.pattern == 'range-singlekey':
                kv = ''
                rand = random.random() * 10000
                for i in xrange(10):
                    if self.op_rotate == 'read':
                        kv += '%s_%d ' % (self.key, rand)
                    else:
                        kv += '%s_%d %s ' % (self.key, rand, rand)

                if self.op_rotate == 'read':
                    self.arcci.append_command(rqst, 'mget %s' % kv.strip())
                else:
                    self.arcci.append_command(rqst, 'mset %s' % kv.strip())

            elif self.pattern == 'pipeline-singlekey':
                rand = random.random() * 10000
                for i in xrange(10):

                    if self.op_rotate == 'read':
                        cmd = 'get %s_%d' % (self.key, rand)
                    elif self.op_rotate == 'write':
                        cmd = 'set %s_%d %s' % (self.key, rand, rand)
                    elif self.op_rotate == 'both':
                        if i % 2:
                            cmd = 'get %s_%d' % (self.key, rand)
                        else:
                            cmd = 'set %s_%d %s' % (self.key, rand, rand)

                    self.arcci.append_command(rqst, cmd)

            elif self.pattern == 'pipeline-multikey':
                for i in xrange(10):
                    rand = random.random() * 10000

                    if self.op_rotate == 'read':
                        cmd = 'get %s_%d' % (self.key, rand)
                    elif self.op_rotate == 'write':
                        cmd = 'set %s_%d %s' % (self.key, rand, rand)
                    elif self.op_rotate == 'both':
                        if i % 2:
                            cmd = 'get %s_%d' % (self.key, rand)
                        else:
                            cmd = 'set %s_%d %s' % (self.key, rand, rand)

                    self.arcci.append_command(rqst, cmd)

            # Send request
            ret = self.arcci.do_request(rqst, self.timeout)
            if ret != 0:
                self.err_cnt += 1
                return False

            # Receive reply
            be_errno, reply = self.arcci.get_reply(rqst)
            if be_errno < 0 or reply == None:
                self.err_cnt += 1
                return False

            # Handle result
            if reply[0] != ARC_REPLY_STATUS:
                self.err_cnt += 1
                return False
            elif reply[0] == ARC_REPLY_ERROR:
                self.err_cnt += 1
                return False

        except:
            if self.verbose:
                util.log('Connection closed in LoadGenerator:%s, except' % cmd)
                util.log(sys.exc_info())
                exc_type, exc_value, exc_traceback = sys.exc_info()
                traceback.print_exception(exc_type,
                                          exc_value,
                                          exc_traceback,
                                          limit=3,
                                          file=sys.stdout)
            self.consistency = False
            return False

        return True
Beispiel #51
0
def quantize_process(model):
    print(
        '------------------------------- accuracy before weight sharing ----------------------------------'
    )
    acc = util.validate(val_loader, model, args)
    util.log(f"{args.save_dir}/{args.log}",
             f"accuracy before weight sharing\t{acc}")

    print(
        '------------------------------- accuacy after weight sharing -------------------------------'
    )

    old_weight_list, new_weight_list, quantized_index_list, quantized_center_list = apply_weight_sharing(
        model, args.model_mode, args.bits)

    acc = util.validate(val_loader, model, args)
    util.save_checkpoint(
        {
            'state_dict': model.state_dict(),
            'best_prec1': acc,
        },
        True,
        filename=os.path.join(
            args.save_dir,
            'checkpoint_{}_alpha_{}.tar'.format('quantized', args.alpha)))

    util.log(f"{args.save_dir}/{args.log}",
             f"weight\t{args.save_dir}/{args.out_quantized_folder}")
    util.log(f"{args.save_dir}/{args.log}",
             f"model\t{args.save_dir}/model_quantized.ptmodel")
    util.log(f"{args.save_dir}/{args.log}",
             f"accuracy after weight sharing {args.bits}bits\t{acc}")

    util.layer2torch(f"{args.save_dir}/{args.out_quantized_folder}", model)
    util.save_parameters(f"{args.save_dir}/{args.out_quantized_folder}",
                         new_weight_list)

    print(
        '------------------------------- retraining -------------------------------------------'
    )

    util.quantized_retrain(model, args, quantized_index_list,
                           quantized_center_list, train_loader, val_loader)

    acc = util.validate(val_loader, model, args)
    util.save_checkpoint(
        {
            'state_dict': model.state_dict(),
            'best_prec1': acc,
        },
        True,
        filename=os.path.join(
            args.save_dir,
            'checkpoint_{}_alpha_{}.tar'.format('quantized_re', args.alpha)))

    util.layer2torch(f"{args.save_dir}/{args.out_quantized_re_folder}", model)

    util.log(f"{args.save_dir}/{args.log}",
             f"weight:{args.save_dir}/{args.out_quantized_re_folder}")
    util.log(
        f"{args.save_dir}/{args.log}",
        f"model:{args.save_dir}/model_quantized_bit{args.bits}_retrain{args.reepochs}.ptmodel"
    )
    util.log(f"{args.save_dir}/{args.log}",
             f"acc after qauntize and retrain\t{acc}")

    weight_list = util.parameters2list(model.children())
    util.save_parameters(f"{args.save_dir}/{args.out_quantized_re_folder}",
                         weight_list)
    return model
Beispiel #52
0
  def handle_arrival_msg(self):
    msg = self.network_layer.recv()
    msg_data = util.extract_data(msg)

    if(msg_data.is_corrupt):
      if(self.is_receiver):
        if self.expected_sequence_number == 0:
          util.log("Packet received is corrupted. " + self.NO_PREV_ACK_MSG)
          return
        self.network_layer.send(self.receiver_last_ack)
        util.log("Received corrupted data. Resending ACK: "
                 + util.pkt_to_string(util.extract_data(self.receiver_last_ack)))
      return

    # If ACK message, assume its for sender
    if msg_data.msg_type == config.MSG_TYPE_ACK:
      self.sender_lock.acquire()
      self.sender_base = msg_data.seq_num + 1
      if(self.sender_base == self.next_sequence_number):
        util.log("Received ACK with seq # matching the end of the window: "
                 + util.pkt_to_string(msg_data) + ". Cancelling timer.")
        self.timer.cancel()
      else:
        util.log("Received ACK: " + util.pkt_to_string(msg_data)
                 + ". There are messages in-flight. Restarting the timer.")
        if self.timer.is_alive(): self.timer.cancel()
        self.set_timer()
        self.timer.start()
      self.sender_lock.release()
    # If DATA message, assume its for receiver
    else:
      assert msg_data.msg_type == config.MSG_TYPE_DATA
      util.log("Received DATA: " + util.pkt_to_string(msg_data))
      if msg_data.seq_num == self.expected_sequence_number:
        self.msg_handler(msg_data.payload)
        ack_pkt = util.make_packet(b'', config.MSG_TYPE_ACK, self.expected_sequence_number)
        self.network_layer.send(ack_pkt)
        self.receiver_last_ack = ack_pkt
        self.expected_sequence_number += 1
        util.log("Sent ACK: " + util.pkt_to_string(util.extract_data(ack_pkt)))
      else:
        if self.expected_sequence_number == 0:
          util.log("Packet received is out of order. " + self.NO_PREV_ACK_MSG)
          return
        util.log("DATA message had unexpected sequence #"
                 + str(int(msg_data.seq_num)) + ". Resending ACK message with sequence # "
                 + str(int(self.expected_sequence_number-1)) + ".")
        self.network_layer.send(self.receiver_last_ack)
    return
Beispiel #53
0
    callbacks = [earlystopping]

    model.fit_generator(generator=train_loader,
                        validation_data=val_loader,
                        epochs=epochs,
                        callbacks=callbacks,
                        max_queue_size=max_queue_size,
                        use_multiprocessing=use_multiprocessing,
                        workers=workers)

    # only save final model after earlystopping, due to issue https://github.com/keras-team/keras/issues/11101
    model.save(model_params)


if __name__ == '__main__':
    util.log("Training...")
    config = ConfigParser()
    config.read('./data/input/config.INI')

    parser = argparse.ArgumentParser(
        description='Bundle line separated corpora.')

    parser.add_argument(
        '--sequence_length',
        type=int,
        default=config.getint('TRAINING', 'sequence_length'),
        help='The (maximum) sequence length of one input text (padded).')
    parser.add_argument('--embedding_dim_target',
                        type=int,
                        default=config.get('TRAINING', 'embedding_dim_target'),
                        help='Word vector dimension of the target language.')
Beispiel #54
0
 def shutdown(self):
   if not self.is_receiver: self._wait_for_last_ACK()
   if self.timer.is_alive(): self.timer.cancel()
   util.log("Connection shutting down...")
   self.network_layer.shutdown()
def initialize_starting_up_smr_before_redis( cluster, verbose=2, conf=None ):
    if conf == None:
        conf = {'smr_log_delete_delay':86400,
                'cm_context':''}
    if conf.has_key('smr_log_delete_delay') == False:
        conf['smr_log_delete_delay'] = 86400
    if conf.has_key('cm_context') == False:
        conf['cm_context'] = ''

    if testbase.cleanup_zookeeper_root() is not 0:
        util.log('failed to cleanup_zookeeper_root')
        return -1

    if testbase.cleanup_processes() is not 0:
        util.log('failed to cleanup_test_environment')
        return -1

    for server in cluster['servers']:
        if testbase.cleanup_pgs_log_and_ckpt( cluster['cluster_name'], server ) is not 0:
            util.log( 'failed to cleanup_pgs_data' )
            return -1

    for server in cluster['servers']:
        if testbase.request_to_start_cm( server['id'], server['cm_port'], conf['cm_context'] ) is not 0:
            util.log('failed to request_to_start_cm')
            return -1

    if testbase.initialize_cluster( cluster ) is not 0:
        util.log('failed to setup_znodes')
        return -1

    for server in cluster['servers']:
        if testbase.request_to_start_smr( server, verbose=verbose, log_delete_delay=conf['smr_log_delete_delay'] ) is not 0:
            return -1

    for server in cluster['servers']:
        if testbase.request_to_start_redis( server, check=False ) is not 0:
            return -1

    for server in cluster['servers']:
        if testbase.wait_until_finished_to_set_up_role( server ) is not 0:
            return -1

    for server in cluster['servers']:
        if testbase.request_to_start_gateway( cluster['cluster_name'], server, cluster['servers'][0] ) is not 0:
            util.log('failed to request_to_start_gateway')
            return -1

    return 0
Beispiel #56
0
def pruning_process(model):

    print(
        "------------------------- Before pruning --------------------------------"
    )
    util.print_nonzeros(model, f"{args.save_dir}/{args.log}")
    accuracy = util.validate(val_loader, model, args)

    print(
        "------------------------- pruning CNN--------------------------------------"
    )
    model.prune_by_percentile(['conv1'], q=100 - 58.0)
    model.prune_by_percentile(['conv2'], q=100 - 22.0)
    model.prune_by_percentile(['conv3'], q=100 - 34.0)
    model.prune_by_percentile(['conv4'], q=100 - 36.0)
    model.prune_by_percentile(['conv5'], q=100 - 53.0)
    model.prune_by_percentile(['conv6'], q=100 - 24.0)
    model.prune_by_percentile(['conv7'], q=100 - 42.0)
    model.prune_by_percentile(['conv8'], q=100 - 32.0)
    model.prune_by_percentile(['conv9'], q=100 - 27.0)
    model.prune_by_percentile(['conv10'], q=100 - 34.0)
    model.prune_by_percentile(['conv11'], q=100 - 35.0)
    model.prune_by_percentile(['conv12'], q=100 - 29.0)
    model.prune_by_percentile(['conv13'], q=100 - 36.0)
    print(
        "------------------------------- After prune CNN ----------------------------"
    )
    util.print_nonzeros(model, f"{args.save_dir}/{args.log}")
    prec1 = util.validate(val_loader, model, args)

    util.save_checkpoint(
        {
            'state_dict': model.state_dict(),
            'best_prec1': prec1,
        },
        True,
        filename=os.path.join(
            args.save_dir,
            'checkpoint_{}_alpha_{}.tar'.format('pruned', args.alpha)))
    util.log(f"{args.save_dir}/{args.log}",
             f"weight\t{args.save_dir}/{args.out_pruned_folder}")
    util.log(f"{args.save_dir}/{args.log}",
             f"model\t{args.save_dir}/model_pruned.ptmodel")
    util.log(f"{args.save_dir}/{args.log}", f"prune acc\t{prec1}")

    util.layer2torch(f"{args.save_dir}/{args.out_pruned_folder}", model)
    weight_list = util.parameters2list(model.children())
    util.save_parameters(f"{args.save_dir}/{args.out_pruned_folder}",
                         weight_list)

    print(
        "------------------------- start retrain after prune CNN----------------------------"
    )
    util.initial_train(model, args, train_loader, val_loader, 'prune_re')

    print(
        "------------------------- After Retraining -----------------------------"
    )
    util.print_nonzeros(model, f"{args.save_dir}/{args.log}")
    accuracy = util.validate(val_loader, model, args)

    util.log(f"{args.save_dir}/{args.log}",
             f"weight\t{args.save_dir}/{args.out_pruned_re_folder}")
    util.log(
        f"{args.save_dir}/{args.log}",
        f"model\t{args.save_dir}/model_prune_retrain_{args.reepochs}.ptmodel")
    util.log(f"{args.save_dir}/{args.log}",
             f"prune and retrain acc\t{accuracy}")

    util.layer2torch(f"{args.save_dir}/{args.out_pruned_re_folder}", model)
    weight_list = util.parameters2list(model.children())
    util.save_parameters(f"{args.save_dir}/{args.out_pruned_re_folder}",
                         weight_list)

    return model
Beispiel #57
0
def eatFood(config):
    log("fazendo eatFood")
    pyautogui.rightClick(config.foodPos.x, config.foodPos.y)
def finalize( cluster ):
    for server in cluster['servers']:
        if testbase.kill_all_processes( server ) is not 0:
            util.log('failed to kill_all_processes')
            return -1
    return 0
Beispiel #59
0
    def __call__(self, environ, start_response):

        #        util.log("SCRIPT_NAME='%s', PATH_INFO='%s'" % (environ.get("SCRIPT_NAME"), environ.get("PATH_INFO")))

        # We unquote PATH_INFO here, although this should already be done by
        # the server.
        path = urllib.unquote(environ["PATH_INFO"])
        # issue 22: Pylons sends root as u'/'
        if isinstance(path, unicode):
            util.log("Got unicode PATH_INFO: %r" % path)
            path = path.encode("utf8")

        # Always adding these values to environ:
        environ["wsgidav.config"] = self.config
        environ["wsgidav.provider"] = None
        environ["wsgidav.verbose"] = self._verbose

        ## Find DAV provider that matches the share

        # sorting share list by reverse length
        shareList = self.providerMap.keys()
        shareList.sort(key=len, reverse=True)

        share = None
        for r in shareList:
            # @@: Case sensitivity should be an option of some sort here;
            #     os.path.normpath might give the preferred case for a filename.
            if r == "/":
                share = r
                break
            elif path.upper() == r.upper() or path.upper().startswith(
                    r.upper() + "/"):
                share = r
                break

        provider = self.providerMap.get(share)

        # Note: we call the next app, even if provider is None, because OPTIONS
        #       must still be handled.
        #       All other requests will result in '404 Not Found'
        environ["wsgidav.provider"] = provider

        # TODO: test with multi-level realms: 'aa/bb'
        # TODO: test security: url contains '..'

        # Transform SCRIPT_NAME and PATH_INFO
        # (Since path and share are unquoted, this also fixes quoted values.)
        if share == "/" or not share:
            environ["PATH_INFO"] = path
        else:
            environ["SCRIPT_NAME"] += share
            environ["PATH_INFO"] = path[len(share):]
#        util.log("--> SCRIPT_NAME='%s', PATH_INFO='%s'" % (environ.get("SCRIPT_NAME"), environ.get("PATH_INFO")))

        assert isinstance(path, str)
        # See http://mail.python.org/pipermail/web-sig/2007-January/002475.html
        # for some clarification about SCRIPT_NAME/PATH_INFO format
        # SCRIPT_NAME starts with '/' or is empty
        assert environ["SCRIPT_NAME"] == "" or environ[
            "SCRIPT_NAME"].startswith("/")
        # SCRIPT_NAME must not have a trailing '/'
        assert environ["SCRIPT_NAME"] in (
            "", "/") or not environ["SCRIPT_NAME"].endswith("/")
        # PATH_INFO starts with '/'
        assert environ["PATH_INFO"] == "" or environ["PATH_INFO"].startswith(
            "/")

        start_time = time.time()

        def _start_response_wrapper(status, response_headers, exc_info=None):
            # Postprocess response headers
            headerDict = {}
            for header, value in response_headers:
                if header.lower() in headerDict:
                    util.warn("Duplicate header in response: %s" % header)
                headerDict[header.lower()] = value

            # Check if we should close the connection after this request.
            # http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.4
            forceCloseConnection = False
            currentContentLength = headerDict.get("content-length")
            statusCode = int(status.split(" ", 1)[0])
            contentLengthRequired = (environ["REQUEST_METHOD"] != "HEAD"
                                     and statusCode >= 200
                                     and not statusCode in (204, 304))
            #            print environ["REQUEST_METHOD"], statusCode, contentLengthRequired
            if contentLengthRequired and currentContentLength in (None, ""):
                # A typical case: a GET request on a virtual resource, for which
                # the provider doesn't know the length
                util.warn(
                    "Missing required Content-Length header in %s-response: closing connection"
                    % statusCode)
                forceCloseConnection = True
            elif not type(currentContentLength) is str:
                util.warn(
                    "Invalid Content-Length header in response (%r): closing connection"
                    % headerDict.get("content-length"))
                forceCloseConnection = True

            # HOTFIX for Vista and Windows 7 (issue 13, issue 23)
            # It seems that we must read *all* of the request body, otherwise
            # clients may miss the response.
            # For example Vista MiniRedir didn't understand a 401 response,
            # when trying an anonymous PUT of big files. As a consequence, it
            # doesn't retry with credentials and the file copy fails.
            # (XP is fine however).
            util.readAndDiscardInput(environ)

            # Make sure the socket is not reused, unless we are 100% sure all
            # current input was consumed
            if (util.getContentLength(environ) != 0
                    and not environ.get("wsgidav.all_input_read")):
                util.warn(
                    "Input stream not completely consumed: closing connection")
                forceCloseConnection = True

            if forceCloseConnection and headerDict.get(
                    "connection") != "close":
                util.warn("Adding 'Connection: close' header")
                response_headers.append(("Connection", "close"))

            # Log request
            if self._verbose >= 1:
                userInfo = environ.get("http_authenticator.username")
                if not userInfo:
                    userInfo = "(anonymous)"
                threadInfo = ""
                if self._verbose >= 1:
                    threadInfo = "<%s> " % threading._get_ident()
                extra = []
                if "HTTP_DESTINATION" in environ:
                    extra.append('dest="%s"' % environ.get("HTTP_DESTINATION"))
                if environ.get("CONTENT_LENGTH", "") != "":
                    extra.append("length=%s" % environ.get("CONTENT_LENGTH"))
                if "HTTP_DEPTH" in environ:
                    extra.append("depth=%s" % environ.get("HTTP_DEPTH"))
                if "HTTP_RANGE" in environ:
                    extra.append("range=%s" % environ.get("HTTP_RANGE"))
                if "HTTP_OVERWRITE" in environ:
                    extra.append("overwrite=%s" %
                                 environ.get("HTTP_OVERWRITE"))
                if self._verbose >= 1 and "HTTP_EXPECT" in environ:
                    extra.append('expect="%s"' % environ.get("HTTP_EXPECT"))
                if self._verbose >= 2 and "HTTP_CONNECTION" in environ:
                    extra.append('connection="%s"' %
                                 environ.get("HTTP_CONNECTION"))
                if self._verbose >= 2 and "HTTP_USER_AGENT" in environ:
                    extra.append('agent="%s"' % environ.get("HTTP_USER_AGENT"))
                if self._verbose >= 2 and "HTTP_TRANSFER_ENCODING" in environ:
                    extra.append('transfer-enc=%s' %
                                 environ.get("HTTP_TRANSFER_ENCODING"))
                if self._verbose >= 1:
                    extra.append('elap=%.3fsec' % (time.time() - start_time))
                extra = ", ".join(extra)

                #               This is the CherryPy format:
                #                127.0.0.1 - - [08/Jul/2009:17:25:23] "GET /loginPrompt?redirect=/renderActionList%3Frelation%3Dpersonal%26key%3D%26filter%3DprivateSchedule&reason=0 HTTP/1.1" 200 1944 "http://127.0.0.1:8002/command?id=CMD_Schedule" "Mozilla/5.0 (Windows; U; Windows NT 6.0; de; rv:1.9.1) Gecko/20090624 Firefox/3.5"
                #                print >>sys.stderr, '%s - %s - [%s] "%s" %s -> %s' % (
                print >> sys.stdout, '%s - %s - [%s] "%s" %s -> %s' % (
                    threadInfo + environ.get("REMOTE_ADDR", ""),
                    userInfo,
                    util.getLogTime(),
                    environ.get("REQUEST_METHOD") + " " +
                    environ.get("PATH_INFO", ""),
                    extra,
                    status,
                    #                                        response_headers.get(""), # response Content-Length
                    # referer
                )

            return start_response(status, response_headers, exc_info)

        # Call next middleware
        for v in self._application(environ, _start_response_wrapper):
            yield v
        return
Beispiel #60
0
    def crawl_room(self, room_type, log_file):

        p_daBARs = dict()
        p_CRP1F1 = dict()
        p_FTDAY = dict()

        try:
            daBARs = room_type.find_element_by_xpath('.//div[@data-filter-criterion = "daBARs"]')
            accoms = daBARs.find_element_by_class_name('hotelAccomodations')
            p_daBARs = self.parse_accom(accoms)
            log(log_file, '{} for {} priced at {}. Checkin date:{} Checkout date:{}.'.format('Best Available Rate', self.get_room_name(room_type), p_daBARs, self.start_date, self.end_date))
        except NoSuchElementException:
            log(log_file, 'WARN: {} for {} price not found. Checkin date:{} Checkout date:{}.'.format('Best Available Rate', self.get_room_name(room_type), self.start_date, self.end_date))

        try:
            CRP1F1 = room_type.find_element_by_xpath('.//div[@data-filter-criterion = "CRP1F1"]')
            accoms = CRP1F1.find_element_by_class_name('hotelAccomodations')
            p_CRP1F1 = self.parse_accom(accoms)
            log(log_file, '{} for {} priced at {}. Checkin date:{} Checkout date:{}.'.format('Step Into Summer...', self.get_room_name(room_type), p_CRP1F1, self.start_date, self.end_date))
        except NoSuchElementException:
            log(log_file, 'WARN: {} for {} price not found. Checkin date:{} Checkout date:{}.'.format('Step Into Summer 1 Night', self.get_room_name(room_type), self.start_date, self.end_date))

        try:
            FTDAY = room_type.find_element_by_xpath('.//div[@data-filter-criterion = "14DAY"]')
            accoms = FTDAY.find_element_by_class_name('hotelAccomodations')
            p_FTDAY = self.parse_accom(accoms)
            log(log_file, '{} for {} priced at {}. Checkin date:{} Checkout date:{}.'.format('Advance Purchase Promotion', self.get_room_name(room_type), p_FTDAY, self.start_date, self.end_date))
        except NoSuchElementException:
            log(log_file, 'WARN: {} for {} price not found. Checkin date:{} Checkout date:{}.'.format('Advance Purchase Promotion', self.get_room_name(room_type), self.start_date, self.end_date))

        return {
            'price_bar': p_daBARs,
            'price_summer': p_CRP1F1,
            'price_adv': p_FTDAY
            }