Esempio n. 1
0
def filter_exclude_include(src_list):
    info(u"Applying --exclude/--include")
    cfg = Config()
    exclude_list = FileDict(ignore_case = False)
    for file in src_list.keys():
        debug(u"CHECK: %s" % file)
        excluded = False
        for r in cfg.exclude:
            if r.search(file):
                excluded = True
                debug(u"EXCL-MATCH: '%s'" % (cfg.debug_exclude[r]))
                break
        if excluded:
            ## No need to check for --include if not excluded
            for r in cfg.include:
                if r.search(file):
                    excluded = False
                    debug(u"INCL-MATCH: '%s'" % (cfg.debug_include[r]))
                    break
        if excluded:
            ## Still excluded - ok, action it
            debug(u"EXCLUDE: %s" % file)
            exclude_list[file] = src_list[file]
            del(src_list[file])
            continue
        else:
            debug(u"PASS: %r" % (file))
    return src_list, exclude_list
Esempio n. 2
0
    def jar(self):
        """Performs the 'jar' command."""
        class_name = getattr(self.flags, "class")
        if (class_name is None) and (len(self.args) > 0):
            class_name = self.pop_args_head()
        assert (class_name is not None), ("No class name specified with [--class=]<class>.")

        lib_jars = []
        if self.flags.jars is not None:
            lib_jars.extend(self.flags.jars)
        classpath = list(self.express.get_classpath(lib_jars=lib_jars))

        java_opts = []
        if self.flags.java_opts is not None:
            java_opts = [self.flags.java_opts]

        user_args = list(self.args)
        logging.info("Running java class %r with parameters: %r", class_name, user_args)

        cmd = [
            "java",
            # This property is only needed in kiji-schema v1.1 :
            "-Dorg.kiji.schema.impl.AvroCellEncoder.SCHEMA_VALIDATION=DISABLED",
        ] + java_opts + [
            "-classpath", ":".join(classpath),
            class_name,
        ] + user_args

        logging.debug("Running command:\n%s\n", " \\\n\t".join(map(repr, cmd)))
        return subprocess.call(cmd)
  def _UploadStatus(self, version, status, message=None, fail_if_exists=False,
                    dashboard_url=None):
    """Upload build status to Google Storage.

    Args:
      version: Version number to use. Must be a string.
      status: Status string.
      message: A failures_lib.BuildFailureMessage object with details
               of builder failure, or None (default).
      fail_if_exists: If set, fail if the status already exists.
      dashboard_url: Optional url linking to builder dashboard for this build.
    """
    data = BuilderStatus(status, message, dashboard_url).AsPickledDict()

    # This HTTP header tells Google Storage to return the PreconditionFailed
    # error message if the file already exists.
    gs_version = 0 if fail_if_exists else None

    logging.info('Recording status %s for %s', status, self.build_names)
    for build_name in self.build_names:
      url = BuildSpecsManager._GetStatusUrl(build_name, version)

      # Do the actual upload.
      ctx = gs.GSContext(dry_run=self.dry_run)
      ctx.Copy('-', url, input=data, version=gs_version)
Esempio n. 4
0
	def post(self):
		username = self.request.get('quotationrEmail')

		user = self.user_model.get_by_auth_id(username)
		if not user:
			logging.info('Could not find any user entry for username %s', username)
			self.response.out.write('fail:::cant find email')
			return

		user_id = user.get_id()
		token = self.user_model.create_signup_token(user_id)

		verification_url = self.uri_for('verification', type='p', user_id=user_id,
			signup_token=token, _full=True)

		
		logging.error(verification_url)
		
		mail.send_mail(sender="Quotationr <*****@*****.**>",
                to=user.email_address,
                subject="Reset Your Quotationr Password",
                body="Please click the following link to reset your Quotationr password:\n\n " + verification_url)
		
		#self.response.out.write('success:::' + user.email_address)
		self.response.out.write('success:::email sent')
Esempio n. 5
0
    def Adjustment(self):
        """ adjustment & and blunder removing

            :returns: adjusted coordinates or None
        """
        # adjustment loop
        last_res = None
        while True:
            res, blunder = self.g.adjust()
            if res is None or not 'east' in res[0] or not 'north' in res[0] or \
                              not 'elev' in res[0]:
                # adjustment faild or too many blunders
                if not last_res is None:
                    logging.warning("blunders are not fully removed")
                    res = last_res
                else:
                    logging.error("adjustment failed")
                break
            elif blunder['std-residual'] < 1.0:
                logging.info("blunders removed")
                break
            else:
                logging.info("%s - %s observation removed" % (blunder['from'], blunder['to']))
                self.g.remove_observation(blunder['from'], blunder['to'])
                last_res = res
        return res
Esempio n. 6
0
    def run(self, logger):
        """
        Run test case and gather results
        """
        if self.case_data.skip:
            logging.info("Skipping test case '%s'" % \
                             self.case_data.name)
            return

        logging.info("Running test case '%s' (%s)"
                     % (self.case_data.name,
                        self.case_data.methodname))
        starttime = time()
        try:
            rv = self.test_method(**self.case_data.args)
        except AssertionError, e:
            # The test failed.
            if len(e.args) > 1:
                self.results.append('message', e.args[0].encode("utf-8"))
                self.results.append_screenshot(e.args[1])
            else:
                self.results.append('message', e.args[0].encode("utf-8"))
                self.results.append_screenshot()
            self.results.append('stacktrace', traceback.format_exc())
            self.results['pass'] = 0
  def PublishManifest(self, manifest, version, build_id=None):
    """Publishes the manifest as the manifest for the version to others.

    Args:
      manifest: Path to manifest file to publish.
      version: Manifest version string, e.g. 6102.0.0-rc4
      build_id: Optional integer giving build_id of the build that is
                publishing this manifest. If specified and non-negative,
                build_id will be included in the commit message.
    """
    # Note: This commit message is used by master.cfg for figuring out when to
    #       trigger slave builders.
    commit_message = 'Automatic: Start %s %s %s' % (self.build_names[0],
                                                    self.branch, version)
    if build_id is not None and build_id >= 0:
      commit_message += '\nCrOS-Build-Id: %s' % build_id

    logging.info('Publishing build spec for: %s', version)
    logging.info('Publishing with commit message: %s', commit_message)
    logging.debug('Manifest contents below.\n%s', osutils.ReadFile(manifest))

    # Copy the manifest into the manifest repository.
    spec_file = '%s.xml' % os.path.join(self.all_specs_dir, version)
    osutils.SafeMakedirs(os.path.dirname(spec_file))

    shutil.copyfile(manifest, spec_file)

    # Actually push the manifest.
    self.PushSpecChanges(commit_message)
Esempio n. 8
0
def handle(data, con, apikey=None):
  d = json.loads(data)

  handlers = {'import': importit, 'ping': ping,
      'listimported': listimported, 'slice': sliceit,
      'listdone': listdone, 'getdone': getdone,
      'importconfig': importconfig, 'listconfig': listconfigs,
      'listprogress': listprogress, 'getstats': getstats,
      'journal': getjournal, 'del': wipefile, 'wait': waitfor}

  hndlr = noop
  cmd = 'noop'
  if d.has_key('cmd'):
    if d['cmd'] in handlers.keys():
      cmd = d['cmd']
      hndlr = handlers[cmd]

  logging.info('cmd: ' + cmd)

  if not apikey is None:
    if not (d.has_key('key') and d['key'] == apikey):
      logging.info('authentication failed for "{}" key!'.format(
        '' if not d.has_key('key') else d['key']))
      return json.dumps({'r': 'fail',
        'm': 'authentication failed. incorrect apikey'})

  try:
    r = hndlr(d, con)
    result = json.dumps(r)
  except Exception as e:
    logging.error(str(e))
    result = json.dumps({u'm':unicode(e), u'r':u'fail'})
  logaccess(len(data), len(result), unicode(cmd), con)

  return result
Esempio n. 9
0
  def test_restart(self):
    """test_restart tests that when starting a second vttablet with the same
    configuration as another one, it will kill the previous process
    and take over listening on the socket.

    If vttablet listens to other ports (like gRPC), this feature will
    break. We believe it is not widely used, so we're OK with this for now.
    (container based installations usually handle tablet restarts
    by using a different set of servers, and do not rely on this feature
    at all).
    """
    if environment.topo_server().flavor() != 'zookeeper':
      logging.info("Skipping this test in non-github tree")
      return
    if tablet_62344.grpc_enabled():
      logging.info("Skipping this test as second gRPC port interferes")
      return

    utils.run_vtctl(['CreateKeyspace', 'test_keyspace'])

    # create the database so vttablets start, as it is serving
    tablet_62344.create_db('vt_test_keyspace')

    tablet_62344.init_tablet('master', 'test_keyspace', '0')
    proc1 = tablet_62344.start_vttablet()
    proc2 = tablet_62344.start_vttablet()
    for timeout in xrange(20):
      logging.debug("Sleeping waiting for first process to die")
      time.sleep(1.0)
      proc1.poll()
      if proc1.returncode is not None:
        break
    if proc1.returncode is None:
      self.fail("proc1 still running")
    tablet_62344.kill_vttablet()
Esempio n. 10
0
def slicethread(fname, oname, wname, cfg, jobid):
  retcode = "fail"
  try:
    con = sqlite3.connect('db.sqlite')
    con.row_factory = sqlite3.Row

    cfg = "config.ini" if cfg is None else cfg

    proc = subprocess.Popen(["slic3r",
      "--load", cfg,
      fname, "-o", wname+'.gcode'])
    con.execute('insert into journal(cmd, pid, action, status, timestamp) values(?,?,?,?,DateTime(\'now\'))',
      ('slice {} -c {}'.format(os.path.basename(fname),
                               os.path.basename(cfg)), proc.pid, 'start',
        0 if proc.returncode == None else 1 ))
    con.commit()
    retcode = proc.wait()
    con.execute('insert into journal(cmd, pid, action, status, timestamp) values(?,?,?,?,DateTime(\'now\'))',
      ('slice {} -c {}'.format(os.path.basename(fname),
                               os.path.basename(cfg)), proc.pid, 'stop',
        proc.returncode))
    con.commit()
    try:
      os.unlink(oname+'.gcode')
    except OSError as e:
      pass
    finally:
      try:
        os.rename(wname+'.gcode', oname+'.gcode')
      except Exception:
        logging.info( wname+'.gcode')
        logging.info( oname+'.gcode')
        pass
  finally:
    _work_done(jobid, val=retcode)
Esempio n. 11
0
def _work_reg():
  _task_lock.acquire()
  jid = _task_seq.next()
  _task_list[jid] = False
  logging.info(str(_task_list))
  _task_lock.release()
  return jid
    def PowerOn(self, request, context):
        logging.info("__INIT__PowerOn[VappVmServicer]")
        vapp_vm = VappVm(context)
        res = vapp_vm.power_on(request)
        logging.info("__DONE__PowerOn[VappVmServicer]")

        return res
Esempio n. 13
0
 def execute(self, email_models):
     logging.debug("In Destiny::execute()")
     if not email_models:
         return
     emails_id = []
     destinations = {}
     for destiny in self._plugins.keys():
         destinations.setdefault(destiny, email_models[-1].get(destiny))
         emails_id.append(email_models[-1].email_id())
     for email_model in email_models[:-1]:
         for destiny in self._plugins.keys():
             d_tables = destinations.get(destiny).get("tables")
             for d_table in d_tables:
                 for k, v in d_table.iteritems():
                     m_tables = email_model.get(destiny).get("tables")
                     for m_table in m_tables:
                         if k in m_table:
                             d_table.setdefault(k, []).extend(m_table[k])
         emails_id.append(email_model.email_id())
     for destiny, models in destinations.iteritems():
         for forward in self._plugins.get(destiny):
             try:
                 forward.execute(models)
             except Exception, e:
                 logging.error("!! Error-execute: %s" % (str(e),))
                 logging.info("Add emails in queure error: %s" % str(emails_id))
                 for email_id in emails_id:
                     self.add_email_error(email_id)
                 continue
    def Read(self, request, context):
        logging.info("__INIT__Read[VappVmServicer]")
        vapp_vm = VappVm(context)
        res = vapp_vm.read(request)
        logging.info("__DONE__Read[VappVmServicer]")

        return res
    def ModifyMemory(self, request, context):
        logging.info("__INIT__ModifyMemory[VappVmServicer]")
        vapp_vm = VappVm(context)
        res = vapp_vm.modify_memory(request)
        logging.info("__DONE__ModifyMemory[VappVmServicer]")

        return res
    def read(self, request):
        logging.info("__INIT__read[VappVm]")
        res = vapp_vm_pb2.ReadVappVmResult()
        res.present = False
        org_resource = self.client.get_org()
        org = Org(self.client, resource=org_resource)
        try:
            vdc_resource = org.get_vdc(request.target_vdc)
            vdc = VDC(
                self.client, name=request.target_vdc, resource=vdc_resource)

            vapp_resource = vdc.get_vapp(request.target_vapp)
            vapp = VApp(
                self.client, name=request.target_vapp, resource=vapp_resource)
            read_vapp_vm_resp = vapp.get_vm(request.target_vm_name)
            vm = VM(client=self.client, href=None, resource=read_vapp_vm_resp)

            res.present = True
        except Exception as e:
            errmsg = '__ERROR_read[VappVm] failed for VappVm {0}. __ErrorMessage__ {1}'
            logging.warn(errmsg.format(request.target_vm_name, str(e)))

            return res

        logging.info("__DONE__read[VappVm]")

        return res
    def Delete(self, request, context):
        logging.info("__INIT__Delete[VappVmServicer]")
        vapp_vm = VappVm(context)
        res = vapp_vm.delete(request)
        logging.info("__DONE__Delete[VappVmServicer]")

        return res
Esempio n. 18
0
    def pre(self, emulator=None):
        """
        _pre_

        Pre execution checks

        """
        if emulator is not None:
            return emulator.emulatePre(self.step)
        logging.info("Pre-executing CMSSW step")
        if hasattr(self.step.application.configuration, "configCacheUrl"):
            # means we have a configuration & tweak in the sandbox
            psetFile = self.step.application.command.configuration
            psetTweak = self.step.application.command.psetTweak
            self.stepSpace.getFromSandbox(psetFile)

            if psetTweak:
                self.stepSpace.getFromSandbox(psetTweak)

        if hasattr(self.step, "pileup"):
            self.stepSpace.getFromSandbox("pileupconf.json")

        # add in ths scram env PSet manip script whatever happens
        self.step.runtime.scramPreScripts.append("SetupCMSSWPset")
        return None
Esempio n. 19
0
 def kill_process(self, process):
     """
     Kill the given process.
     """
     logging.info('killing %s', process)
     drone = self._get_drone_for_process(process)
     drone.queue_call('kill_process', process)
Esempio n. 20
0
def make_parser():
    """ Construct the command line parser """
    logging.info("Constructing parser")
    description = "Store and retrieve snippets of text"
    parser = argparse.ArgumentParser(description=description)

    subparsers = parser.add_subparsers(help="Available commands")

    # Subparser for the put command
    logging.debug("Constructing put subparser")
    put_parser = subparsers.add_parser("put", help="Store a snippet")
    put_parser.add_argument("name", help="The name of the snippet")
    put_parser.add_argument("snippet", help="The snippet text")
    put_parser.add_argument("filename", default="snippets.csv", nargs="?",
                            help="The snippet filename")
    put_parser.set_defaults(command="put")

    # Subparser for the get command
    logging.debug("Constructing get subparser")
    put_parser = subparsers.add_parser("get", help="Retrieve a snippet")
    put_parser.add_argument("name", help="The name of the snippet")
    put_parser.add_argument("filename", default="snippets.csv", nargs="?",
                            help="The snippet filename")
    put_parser.set_defaults(command="get")

    return parser
Esempio n. 21
0
    def compare(self, bag):
        if len(self.iplist) > 0 and (self.dev not in bag.keys() or len(bag[self.dev]) == 0):
            # Remove all IPs on this device
            logging.info(
                "Will remove all configured addresses on device %s", self.dev)
            self.delete("all")
            app = CsApache(self)
            app.remove()

        # This condition should not really happen but did :)
        # It means an apache file got orphaned after a guest network address
        # was deleted
        if len(self.iplist) == 0 and (self.dev not in bag.keys() or len(bag[self.dev]) == 0):
            app = CsApache(self)
            app.remove()

        for ip in self.iplist:
            found = False
            if self.dev in bag.keys():
                for address in bag[self.dev]:
                    self.setAddress(address)
                    if self.hasIP(ip):
                        found = True
                    if self.is_guest_gateway(address, ip):
                        found = True
            if not found:
                self.delete(ip)
Esempio n. 22
0
    def post(self):
        args = parser.parse_args()
        ip_address = request.remote_addr
        port = args['port']

        worker = Worker.query.filter_by(ip_address=ip_address, port=port).first()
        if not worker:
            logging.info("New worker connecting from {0}".format(ip_address))
            worker = Worker(hostname=args['hostname'],
                          ip_address=ip_address,
                          port=port,
                          status='enabled',
                          current_task=None,
                          log=None,
                          time_cost=None,
                          activity=None,
                          connection='online',
                          system=args['system'])
        else:
            worker.connection = 'online'
            worker.current_task = None

        db.session.add(worker)
        db.session.commit()

        return '', 204
Esempio n. 23
0
def move_data():
    db = DBConnection().db
       
    mobiles = ['18310505991', '13693675352', '13581731204']
    message = "数据库T_LOCATION已经完全转移到T_LOCATION_NEW,请及确认表信息的正确性和完整性。"
    #max_row = 1000000000
    max_row = 250000000
    begin_time = time.gmtime(time.time())
    for i in range(10000, max_row, 10000):
        sql = "INSERT INTO T_LOCATION_NEW" \
              " SELECT * FROM T_LOCATION WHERE id <=%d AND id > %d -10000" \
              " and (timestamp between 0 and 1448899200)" % (i, i)
        logging.info("exectue sql:%s", sql)
        
        n = db.execute(sql)
        #time.sleep(0.1)
        logging.info("last record  row id =%s", n)
        break
       # if i = 250000000:
        if i == 240000000:
            for mobile in mobiles:
                SMSHelper.send(mobile, message)    
                print "send", mobile
    end_time = time.gmtime(time.time())
    L_bak = "alter table T_LOCATION rename  to T_LOCATION_bak"
    NEW_L = "alter table T_LOCATION_NEW rename  to T_LOCATION"
    
    for i in range(1, 5): 
        time.sleep(1)
        logging.info("Will rename table neame after %d second", 5-i)
    
    db.execute(L_bak)
    db.execute(NEW_L)
    logging.info("exchange tables T_LOCATION and T_LOCATION_NEW is accomplished ")
    logging.info("Move table data begin_time:%s, end_time:%s", begin_time, end_time)
Esempio n. 24
0
def read_dependency_xml(moduleName, config):
    qt_path = config.get('default', 'qt')
    xml_path = os.path.join(
        qt_path, 'lib/{}-android-dependencies.xml'.format(moduleName))
    if not os.path.exists(xml_path):
        log.info("module {} do not have xml {}".format(moduleName, xml_path))
        return

    tree = ElementTree.parse(xml_path)
    root = tree.getroot()
    lib_tag = root.find('dependencies/lib')

    name = "" if not lib_tag.attrib else lib_tag.attrib.get('name', "")

    if name != moduleName:
        raise Exception("moduleName({}) and name from xml({}) do not match".format(
            moduleName, name))

    deps_tag = lib_tag.find('depends')
    deps = list()

    for child in deps_tag:
        info = {
            "tag": child.tag
        }
        info.update(child.attrib)
        deps.append(info)

    return deps
def main():
    """
    SendDataClient类,作为flashshot的服务
    """

    log.init_log('./logs/send_data_client')

    # 如果配置文件中配置了这个地区需要设定IP代理,则在上传文件前,先将代理IP取消,然后等执行完毕后再设置上
    # 并且将 uping 设置为 1,此时每五分钟执行的checkIpProxy将不会修改此IP,上传结束后就修改回 0
    if config.NEED_PROXY:
        configFile = ConfigParser.ConfigParser()
        configFile.read(CONFIGFILE)
        configFile.set("info", "uping", 1)
        configFile.write(open(CONFIGFILE, "w"))
        logging.info('setProxy("0") ')
        # 在传送图片前,先将本地代理IP关掉
        ipProxy.setProxy("0")

    target_folder = sys.argv[1]
    target_filenames = get_file_list(target_folder)
    upload_files(target_folder, target_filenames)  
    
    # 在传送图片后,将本地代理Ip继续设定
    if config.NEED_PROXY:
        configFile = ConfigParser.ConfigParser()
        configFile.read(CONFIGFILE)
        ip1 = configFile.get("info", "ip1")
        configFile.set("info", "uping", 0)
        configFile.write(open(CONFIGFILE, "w"))
        enableProxyScript = "python ipProxy.py " + ip1
        os.popen(enableProxyScript)
        # ipProxy.setProxy(ip1)
        logging.info('setProxy ' + ip1)
Esempio n. 26
0
    def get(self, request, *args, **kwargs):
        if 'cname' in request.GET and request.GET['cname'] != "":
            #cname = request.GET['cname']
            cname = " and (Contact_FirstName like '%%"+request.GET['cname']+"%%' or Contact_LastName like '%%"+request.GET['cname']+"%%')"
        else:
            cname = ""

        if 'ccompany' in request.GET and request.GET['ccompany'] != "":
            ccompany = " and Contact_Company like '%%"+request.GET['ccompany']+"%%'"
        else:
            ccompany = ""
        
        if 'ctype' in request.GET and request.GET['ctype'] != "" and request.GET['ctype'] != "Contact Type":
            ctype = " and Contact_Type like '%%"+request.GET['ctype']+"%%'"
        else:
            ctype = ""
        #allcontacts = Tblcontact.objects.raw("select * from tblcontact where Contact_PK>0 "+cname+" "+cemail+" "+ctype+"")
        if 'noof' in request.GET and request.GET['noof'] != "":
            noof = request.GET['noof']
        else:
            noof = 25
        allcalls = Tblcalls.objects.raw("select * from tblcalls where Calls_StaffID = "+str(request.session['Staff'].staff_pk)+" and (Calls_Deleted!=1)  limit "+str(noof)+"")
        calltypes = Tblcalltype.objects.all()
        callactions = Tblcallaction.objects.all()
        callsource = Tblcallsource.objects.all()
        logging.info('total contacts:: %s',request.session['Staff'].staff_pk)
        content = {'page_title': "View Calls",
                   'allitems':allcalls,"calltypes":calltypes,"callactions":callactions,
                   "callsource":callsource,}
        return render_template(request, "viewcalls.htm", content)
Esempio n. 27
0
  def __init__(self):
    """Initializes the manager by reading the config file."""

    self.routers = []
    self.auth_manager = auth_manager.AuthorizationManager()

    self.default_router = self._CreateRouter(config.CONFIG["API.DefaultRouter"])

    if config.CONFIG["API.RouterACLConfigFile"]:
      logging.info("Using API router ACL config file: %s",
                   config.CONFIG["API.RouterACLConfigFile"])

      with open(config.CONFIG["API.RouterACLConfigFile"], mode="rb") as fh:
        acl_list = APIAuthorization.ParseYAMLAuthorizationsList(fh.read())

      if not acl_list:
        raise InvalidAPIAuthorization("No entries added from "
                                      "RouterACLConfigFile.")

      for index, acl in enumerate(acl_list):
        router = self._CreateRouter(acl.router, params=acl.router_params)
        self.routers.append(router)

        router_id = str(index)
        self.auth_manager.DenyAll(router_id)

        for group in acl.groups:
          self.auth_manager.AuthorizeGroup(group, router_id)

        for user in acl.users:
          self.auth_manager.AuthorizeUser(user, router_id)
Esempio n. 28
0
def analyze_testcases(test_results_section):
    case_names = {}
    result_types = []
    result_fields = {}
    for test_result in test_results_section:
        case_name = test_result['case_name']
        if case_name in case_names:
            case_names[case_name] += 1
        else:
            case_names[case_name] = 1
            result_types.append(test_result)

    for test_result in result_types:
        for field in test_result.iterkeys():
            if field in result_fields:
                result_fields[field] += 1
            else:
                result_fields[field] = 1

    logging.info("Number of different case names: {}\n".format(len(case_names)))
    for case_name, occurrences in case_names.iteritems():
        logging.info("Case name '{}' occurred {} times".format(case_name, occurrences))
    logging.info('')
    for field, occurrences in result_fields.iteritems():
        logging.info("Field '{}' occurred {} times".format(field, occurrences))
    logging.info('')
Esempio n. 29
0
    def makeJar(self, infile, jardir):
        '''makeJar is the main entry point to JarMaker.

        It takes the input file, the output directory, the source dirs and the
        top source dir as argument, and optionally the l10n dirs.
        '''

        # making paths absolute, guess srcdir if file and add to sourcedirs
        _normpath = lambda p: os.path.normpath(os.path.abspath(p))
        self.topsourcedir = _normpath(self.topsourcedir)
        self.sourcedirs = [_normpath(p) for p in self.sourcedirs]
        if self.localedirs:
            self.localedirs = [_normpath(p) for p in self.localedirs]
        elif self.relativesrcdir:
            self.localedirs = \
                self.generateLocaleDirs(self.relativesrcdir)
        if isinstance(infile, basestring):
            logging.info('processing ' + infile)
            self.sourcedirs.append(_normpath(os.path.dirname(infile)))
        pp = self.pp.clone()
        pp.out = JarManifestParser()
        pp.do_include(infile)

        for info in pp.out:
            self.processJarSection(info, jardir)
Esempio n. 30
0
def parse_files(filelist, doctype='grant'):
    """
    Takes in a list of patent file names (from __main__() and start.py) and commits
    them to the database. This method is designed to be used sequentially to
    account for db concurrency.  The optional argument `commit_frequency`
    determines the frequency with which we commit the objects to the database.
    If set to 0, it will commit after all patobjects have been added.  Setting
    `commit_frequency` to be low (but not 0) is helpful for low memory machines.
    """
    if not filelist:
        return
    commit = alchemy.commit
    for filename in filelist:
        print filename
        for i, xmltuple in enumerate(extract_xml_strings(filename)):
            patobj = parse_patent(xmltuple, filename, doctype)
            if doctype == 'grant':
                alchemy.add_grant(patobj)
                commit = alchemy.commit
            else:
                alchemy.add_application(patobj)
                commit = alchemy.commit_application
            if commit_frequency and ((i+1) % commit_frequency == 0):
                commit()
                logging.info("{0} - {1} - {2}".format(filename, (i+1), datetime.datetime.now()))
                print " *", (i+1), datetime.datetime.now()
            
        commit()
        print " *", "Complete", datetime.datetime.now()
Esempio n. 31
0
 def getNumPenceTweets(self):
     logging.info('getting number of pence tweets...')
     new_tweets_total = self.getNumTweetsGeneric('https://twitter.com/vp')
     return new_tweets_total
Esempio n. 32
0
def run(test, params, env):
    """
    Test hot unplug virtio serial devices.

     1) Start guest with virtio serial device(s).
     2) Run serial data trainsfer in background
     3) Load module in guest os(linux only).
     4) For each of the virtio serial ports, do following steps one by one:
     4.1) Unload module in guest(linux only)
     4.2) Hot-unplug the virtio serial port
     4.3) Hotplug the devices
     4.4) Reload module in the guest(linux only)
     5) Repeat step2,3,4 100 times
     6) Run serial data transfer after repeated unplug/plug
     7) Reboot VM to make sure the guest kernel not panic.

    :param test:   QEMU test object.
    :param params: Dictionary with the test parameters.
    :param env:    Dictionary with test environment.
    """
    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()
    os_type = params["os_type"]
    timeout = int(params.get("login_timeout", 360))
    module = params.get("modprobe_module")
    check_module = params.get_boolean("check_module", True)
    bg_test = params.get_boolean("bg_test", True)
    host_script = params["host_script"]
    check_pid_command = "pgrep -f %s" % host_script
    orig_set = set(process.getoutput(check_pid_command).splitlines())
    session = vm.wait_for_login()
    if os_type == "windows":
        driver_name = params["driver_name"]
        session = utils_test.qemu.windrv_check_running_verifier(
            session, vm, test, driver_name)
    if module and check_module:
        error_context.context("Load module %s" % module, logging.info)
        session.cmd("modprobe %s" % module)
        time.sleep(1)
    session.close()

    for port in params.objects("serials"):
        session = vm.wait_for_login(timeout=timeout)
        port_params = params.object_params(port)
        if not port_params['serial_type'].startswith('virt'):
            continue
        virtio_port = vm.devices.get(port)
        if not virtio_port:
            test.fail("Virtio Port '%s' not found" % port)
        chardev_qid = virtio_port.get_param("chardev")
        try:
            port_chardev = vm.devices.get_by_qid(chardev_qid)[0]
        except IndexError:
            test.error("Failed to get device %s" % chardev_qid)
        if port_params['serial_type'] == 'virtserialport':
            params['file_transfer_serial_port'] = port
            if bg_test:
                run_bg_test(test, params, vm)
        for repeat in range(params.get_numeric("repeat_times", 1)):
            repeat += 1
            if module and check_module:
                error_context.context("Unload module %s" % module,
                                      logging.info)
                session.cmd("modprobe -r %s" % module)
                time.sleep(1)
            error_context.context(
                "Unplug virtio port '%s' in %d tune(s)" % (port, repeat),
                logging.info)
            vm.devices.simple_unplug(virtio_port, vm.monitor)
            if port_params.get("unplug_chardev") == "yes":
                error_context.context(
                    "Unplug chardev '%s' for virtio port '%s'" %
                    (port, chardev_qid), logging.info)
                vm.devices.simple_unplug(port_chardev, vm.monitor)
                time.sleep(0.5)
                vm.devices.simple_hotplug(port_chardev, vm.monitor)
            vm.devices.simple_hotplug(virtio_port, vm.monitor)
            if module and check_module:
                error_context.context("Load  module %s" % module, logging.info)
                session.cmd("modprobe %s" % module)
                time.sleep(1)
        session.close()
        test_set = set(process.getoutput(check_pid_command).splitlines())
        difference = test_set.difference(orig_set)
        if difference:
            logging.info("Kill the first serial process on host")
            result = process.system('kill -9 %s' % difference.pop(),
                                    shell=True)
            if result != 0:
                logging.error(
                    "Failed to kill the first serial process on host!")
        if transfer_data(params, vm) is not True:
            test.fail("Serial data transfter test failed.")
    vm.reboot()
    vm.verify_kernel_crash()
    session = vm.wait_for_login(timeout=timeout)
    session.close()
Esempio n. 33
0
def init(args):
    encoding = args.get('--encoding')
    extra_ignore_dirs = args.get('--ignore')
    follow_links = not args.get('--no-follow-links')
    input_path = args['<path>']
    if input_path is None:
        input_path = os.path.abspath(os.curdir)

    if extra_ignore_dirs:
        extra_ignore_dirs = extra_ignore_dirs.split(',')

    candidates = get_all_imports(input_path,
                                 encoding=encoding,
                                 extra_ignore_dirs=extra_ignore_dirs,
                                 follow_links=follow_links)
    candidates = get_pkg_names(candidates)
    logging.debug("Found imports: " + ", ".join(candidates))
    pypi_server = "https://pypi.python.org/pypi/"
    proxy = None
    if args["--pypi-server"]:
        pypi_server = args["--pypi-server"]

    if args["--proxy"]:
        proxy = {'http': args["--proxy"], 'https': args["--proxy"]}

    if args["--use-local"]:
        logging.debug(
            "Getting package information ONLY from local installation.")
        imports = get_import_local(candidates, encoding=encoding)
    else:
        logging.debug("Getting packages information from Local/PyPI")
        local = get_import_local(candidates, encoding=encoding)
        # Get packages that were not found locally
        difference = [x for x in candidates
                      if x.lower() not in [z['name'].lower() for z in local]]
        imports = local + get_imports_info(difference,
                                           proxy=proxy,
                                           pypi_server=pypi_server)

    path = (args["--savepath"] if args["--savepath"] else
            os.path.join(input_path, "requirements.txt"))

    if args["--diff"]:
        diff(args["--diff"], imports)
        return

    if args["--clean"]:
        clean(args["--clean"], imports)
        return

    if (not args["--print"]
            and not args["--savepath"]
            and not args["--force"]
            and os.path.exists(path)):
        logging.warning("Requirements.txt already exists, "
                        "use --force to overwrite it")
        return

    if args.get('--no-pin'):
        imports = [{'name': item["name"], 'version': ''} for item in imports]

    if args["--print"]:
        output_requirements(imports)
        logging.info("Successfully output requirements")
    else:
        generate_requirements_file(path, imports)
        logging.info("Successfully saved requirements file in " + path)
Esempio n. 34
0
             if v not in d:
                 print('%s not key in map file %s' % (v,sMapFile))
             dMapper[k] = d[v] if v in d else dMapper[k]
     return dMapper,dMasterPtrn

def ArgParser():
    parser = argparse.ArgumentParser(description='This code for converting ctm alignment file to praat TextGrid file', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('ctm_dir',  help='The path to the directory contains ctm files', type=str)
    parser.add_argument('phones',  help='The path to the phones file that mapping between int to symbol', type=str)
    parser.add_argument('out_dir', help='The path to store TextGrid files', type=str)
    parser.add_argument('-l', '--lexicon', help='The lexicon file mapping between words tp phoneme sequence', dest='lexicon', type=str, default='')
    parser.add_argument('-m', '--phMap', help='Mapping phoneme symbols to other symbols (i.e samba to ipa)', dest='phMap', type=str, default='')
    return parser.parse_args()


if __name__ == '__main__':
    args = ArgParser()
    sCtmDir, sPhonesFile, sOutDir = args.ctm_dir, args.phones, args.out_dir
    sPhoneMapFile = args.phMap
    #Regular expression to remove the position suffix from the key of dictionary
    ptrn_pos = re.compile('_[BIES]')
    dMapper,rPtrn = prepare_mapper(((sPhonesFile,None),(sPhoneMapFile,ptrn_pos)))
    log.info("Mapper have been created from %s" % ' '.join((sPhonesFile,sPhoneMapFile)))
    lCtmFiles = glob.glob(join(sCtmDir,'*.ctm'))
    log.info("%d cmt files found in %s" % (len(lCtmFiles),sCtmDir))
    dAlign = defaultdict(list)
    for fCtm in lCtmFiles:
        log.debug("Now processing %s" % fCtm)
        loadCMT(fCtm,dAlign)
    Generate_TxtGrid(sOutDir, dAlign, dMapper=dMapper, cSpkrIdDil='-', sSuffix = '',bWordTier = False)
Esempio n. 35
0
def showme(data):
    for line in data:
        logging.info("  {0}".format(line))
Esempio n. 36
0
def download_and_extract_repo(external_repo_name: str,
                              external_repo_dir: str = None) -> None:
    """
    Downloads and extracts an external repository for use within ARMORY. The external
    repositories project root will be added to the sys path.

    Private repositories require an `ARMORY_GITHUB_TOKEN` environment variable.
    :param external_repo_name: String name of "organization/repo-name" or "organization/repo-name@branch"
    """
    verify_ssl = get_verify_ssl()

    if external_repo_dir is None:
        external_repo_dir = paths.runtime_paths().external_repo_dir

    os.makedirs(external_repo_dir, exist_ok=True)
    headers = {}

    if "@" in external_repo_name:
        org_repo_name, branch = external_repo_name.split("@")
    else:
        org_repo_name = external_repo_name
        branch = "master"
    repo_name = org_repo_name.split("/")[-1]

    if "ARMORY_GITHUB_TOKEN" in os.environ and os.getenv(
            "ARMORY_GITHUB_TOKEN") != "":
        headers = {
            "Authorization": f'token {os.getenv("ARMORY_GITHUB_TOKEN")}'
        }

    response = requests.get(
        f"https://api.github.com/repos/{org_repo_name}/tarball/{branch}",
        headers=headers,
        stream=True,
        verify=verify_ssl,
    )

    if response.status_code == 200:
        logging.info(f"Downloading external repo: {external_repo_name}")

        tar_filename = os.path.join(external_repo_dir, repo_name + ".tar.gz")
        with open(tar_filename, "wb") as f:
            f.write(response.raw.read())
        tar = tarfile.open(tar_filename, "r:gz")
        dl_directory_name = tar.getnames()[0]
        tar.extractall(path=external_repo_dir)

        # Always overwrite existing repositories to keep them at HEAD
        final_dir_name = os.path.join(external_repo_dir, repo_name)
        if os.path.isdir(final_dir_name):
            shutil.rmtree(final_dir_name)
        os.rename(
            os.path.join(external_repo_dir, dl_directory_name),
            final_dir_name,
        )
        add_path(final_dir_name, include_parent=True)

    else:
        raise ConnectionError(
            "Unable to download repository. If it's private make sure "
            "`ARMORY_GITHUB_TOKEN` environment variable is set\n"
            f"status_code is {response.status_code}\n"
            f"full response is {response.text}")
Esempio n. 37
0
 def getNumTrumpTweets(self):
     logging.info('getting number of trump tweets...')
     new_tweets_total = self.getNumTweetsGeneric(
         'https://twitter.com/realDonaldTrump')
     return new_tweets_total
Esempio n. 38
0
 def getNumPotusTweets(self):
     logging.info('getting number of potus tweets...')
     new_tweets_total = self.getNumTweetsGeneric(
         'https://twitter.com/potus')
     return new_tweets_total
Esempio n. 39
0
def tyrell():
    count = redis.incr('hit-tyrell')
    logging.info('Somebody liked the Tyrells, counter ' + str(count))
    return render_template('index.html', tyrellcounter=count)
Esempio n. 40
0
 def getNumWhitehouseTweets(self):
     logging.info('getting number of whitehouse tweets...')
     new_tweets_total = self.getNumTweetsGeneric(
         'https://twitter.com/whitehouse')
     return new_tweets_total
Esempio n. 41
0
def targaryan():
    count = redis.incr('hit-targaryan')
    logging.info('Somebody liked the Targaryans, counter ' + str(count))
    return render_template('index.html', targaryancounter=count)
Esempio n. 42
0
 def getNumAlexTestTweets(self):
     logging.info('getting number of test tweets...')
     new_tweets_total = self.getNumTweetsGeneric(
         'https://twitter.com/Alexdai82109299')
     return new_tweets_total
Esempio n. 43
0
def greyjoy():
    count = redis.incr('hit-greyjoy')
    logging.info('Somebody liked the Greyjoys, counter ' + str(count))
    return render_template('index.html', greyjoycounter=count)
Esempio n. 44
0
def tully():
    count = redis.incr('hit-tully')
    logging.info('Somebody liked the Tullys, counter ' + str(count))
    return render_template('index.html', tullycounter=count)
Esempio n. 45
0
def barratheon():
    count = redis.incr('hit-barratheon')
    logging.info('Somebody liked the Barratheons, counter ' + str(count))
    return render_template('index.html', barratheoncounter=count)
Esempio n. 46
0
def lannister():
    count = redis.incr('hit-lannister')
    logging.info('Somebody liked the Lanisters, counter ' + str(count))
    return render_template('index.html', lannistercounter=count)
Esempio n. 47
0
def arryn():
    count = redis.incr('hit-arryn')
    logging.info('Somebody liked the arryns, counter ' + str(count))
    return render_template('index.html', arryncounter=count)
Esempio n. 48
0
def dorne():
    count = redis.incr('hit-dorne')
    logging.info('Somebody liked the Dornes,counter ' + str(count))
    return render_template('index.html', dornecounter=count)
Esempio n. 49
0
    def copy(self,
             other,
             parameters=None,
             parameter_names=None,
             posterior_only=False,
             **kwargs):
        """Copies data in this file to another file.

        The samples and stats to copy may be down selected using the given
        kwargs. All other data (the "metadata") are copied exactly.

        Parameters
        ----------
        other : str or InferenceFile
            The file to write to. May be either a string giving a filename,
            or an open hdf file. If the former, the file will be opened with
            the write attribute (note that if a file already exists with that
            name, it will be deleted).
        parameters : list of str, optional
            List of parameters to copy. If None, will copy all parameters.
        parameter_names : dict, optional
            Rename one or more parameters to the given name. The dictionary
            should map parameter -> parameter name. If None, will just use the
            original parameter names.
        posterior_only : bool, optional
            Write the samples and likelihood stats as flattened arrays, and
            set other's posterior_only attribute. For example, if this file
            has a parameter's samples written to
            `{samples_group}/{param}/walker{x}`, then other will have all of
            the selected samples from all walkers written to
            `{samples_group}/{param}/`.
        \**kwargs :
            All other keyword arguments are passed to `read_samples`.

        Returns
        -------
        InferenceFile
            The open file handler to other.
        """
        if not isinstance(other, h5py.File):
            # check that we're not trying to overwrite this file
            if other == self.name:
                raise IOError("destination is the same as this file")
            other = InferenceFile(other, 'w')
        # copy metadata over
        self.copy_metadata(other)
        # update other's posterior attribute
        if posterior_only:
            other.attrs['posterior_only'] = posterior_only
        # select the samples to copy
        logging.info("Reading samples to copy")
        if parameters is None:
            parameters = self.variable_args
        # if list of desired parameters is different, rename variable args
        if set(parameters) != set(self.variable_args):
            other.attrs['variable_args'] = parameters
        # if only the posterior is desired, we'll flatten the results
        if not posterior_only and not self.posterior_only:
            kwargs['flatten'] = False
        samples = self.read_samples(parameters, **kwargs)
        logging.info("Copying {} samples".format(samples.size))
        # if different parameter names are desired, get them from the samples
        if parameter_names:
            arrs = {pname: samples[p] for p, pname in parameter_names.items()}
            arrs.update({
                p: samples[p]
                for p in parameters if p not in parameter_names
            })
            samples = FieldArray.from_kwargs(**arrs)
            other.attrs['variable_args'] = samples.fieldnames
        logging.info("Writing samples")
        other.samples_parser.write_samples_group(other, self.samples_group,
                                                 samples.fieldnames, samples)
        # do the same for the likelihood stats
        logging.info("Reading stats to copy")
        stats = self.read_likelihood_stats(**kwargs)
        logging.info("Writing stats")
        other.samples_parser.write_samples_group(other, self.stats_group,
                                                 stats.fieldnames, stats)
        # if any down selection was done, re-set the burn in iterations and
        # the acl, and the niterations.
        # The last dimension of the samples returned by the sampler should
        # be the number of iterations.
        if samples.shape[-1] != self.niterations:
            other.attrs['acl'] = 1
            other.attrs['burn_in_iterations'] = 0
            other.attrs['niterations'] = samples.shape[-1]
        return other
Esempio n. 50
0
def stark():
    count = redis.incr('hit-stark')
    logging.info('Somebody liked the starks, counter ' + str(count))
    return render_template('index.html', starkcounter=count)
Esempio n. 51
0
def run() -> int:
	"""
	This function is the entrypoint into the script's main flow from :func:`traffic_ops_ort.doMain`
	It runs the appropriate actions depending on the run mode

	:returns: an exit code for the script
	"""
	from . import configuration, utils, services

	try:
		api = to_api.API(configuration.USERNAME, configuration.PASSWORD, configuration.TO_HOST,
		                 configuration.HOSTNAME[0], configuration.TO_PORT, configuration.VERIFY,
		                 configuration.TO_USE_SSL)
	except (LoginError, OperationError) as e:
		logging.critical("Failed to authenticate with Traffic Ops")
		logging.error(e)
		logging.debug("%r", e, exc_info=True, stack_info=True)
		return 1

	# If this is just a revalidation, then we can exit if there's no revalidation pending
	if configuration.MODE == configuration.Modes.REVALIDATE:
		try:
			updateRequired = revalidateState(api)
		except ORTException as e:
			logging.debug("%r", e, exc_info=True, stack_info=True)
			return 2

		if not updateRequired:
			logging.info("No revalidation pending")
			return 0

		logging.info("in REVALIDATE mode; skipping package/service processing")

	# In all other cases, we check for an update to the Delivery Service and apply any found
	# changes
	else:
		try:
			updateRequired = syncDSState(api)
		except ORTException as e:
			logging.debug("%r", e, exc_info=True, stack_info=True)
			return 2

		# Bail on failures - unless this script is BADASS!
		if not setStatusFile(api):
			if configuration.MODE is not configuration.Modes.BADASS:
				logging.critical("Failed to set status as specified by Traffic Ops")
				return 2
			logging.warning("Failed to set status but we're BADASS, so moving on.")

		logging.info("\nProcessing Packages...")
		if not processPackages(api):
			logging.critical("Failed to process packages")
			if configuration.MODE is not configuration.Modes.BADASS:
				return 2
			logging.warning("Package processing failed but we're BADASS, so attempting to move on")
		logging.info("Done.\n")

		logging.info("\nProcessing Services...")
		if not processServices(api):
			logging.critical("Failed to process services.")
			if configuration.MODE is not configuration.Modes.BADASS:
				return 2
			logging.warning("Service processing failed but we're BADASS, so attempting to move on")
		logging.info("Done.\n")


	# All modes process configuration files
	logging.info("\nProcessing Configuration Files...")
	if not processConfigurationFiles(api):
		logging.critical("Failed to process configuration files.")
		return 2
	logging.info("Done.\n")

	if updateRequired:
		if configuration.MODE is not configuration.Modes.INTERACTIVE or\
		   utils.getYesNoResponse("Update Traffic Ops?", default='Y'):

			logging.info("\nUpdating Traffic Ops...")
			api.updateTrafficOps()
			logging.info("Done.\n")
		else:
			logging.warning("Traffic Ops was not notified of changes. You should do this manually.")

		return 0

	logging.info("Traffic Ops update not necessary")

	if services.NEEDED_RELOADS and not services.doReloads():
		logging.critical("Failed to reload all configuration changes")
		return 2

	return 0
Esempio n. 52
0
    )
    depth_list.append(node_list[0].depth)

    tree_data.append(
        {
            'avg_entropy': probabilistic_average_entropy,
            'no_of_nodes': len(node_list)
        }
    )
    if len(children) is not 0:
        bfs(children)


if __name__ == "__main__":

    init_logging()
    trsl_instance, clusters, ngram = args_parser()
    tree_data = []
    logging.info("Loading Complete")
    leaf_nodes = []
    sets = []
    sets_count = [0 for x in range(0, clusters)]
    xi = [0 for x in range(0, ngram)]
    length_fragment_row_indices_list = []
    depth_list = []

    if trsl_instance is None:
        logging.error("Error, trsl not trained from precomputed data")
    else:
        plot_graphs()
 async def on_ready(self):
     logging.info(f'Logged in as {self.user} (ID: {self.user.id})')
Esempio n. 54
0
import app
import logging
app.init_logging()
logging.info('TEST日志器能不能正常工作')
Esempio n. 55
0
def run(test, params, env):
    """
    Test qmp event notification, this case will:
    1) Start VM with qmp enable.
    2) Connect to qmp port then run qmp_capabilities command.
    3) Initiate the qmp command defined in config (qmp_cmd)
    4) Verify that qmp command works as designed.

    :param test: QEMU test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environmen.
    """
    def check_result(qmp_o, output=None, exception_list=""):
        """
        Check test result with difference way accoriding to
        result_check.
        result_check = equal, will compare cmd_return_value with qmp
                       command output.
        result_check = contain, will try to find cmd_return_value in qmp
                       command output.
        result_check = m_equal_q, will compare key value in monitor command
                       output and qmp command output.
        result_check = m_in_q, will try to find monitor command output's key
                       value in qmp command output.
        result_check = m_format_q, will try to match the output's format with
                       check pattern.

        :param qmp_o: output from pre_cmd, qmp_cmd or post_cmd.
        :param o: output from pre_cmd, qmp_cmd or post_cmd or an execpt
        :param exception_list: element no need check.
        result set in config file.
        """
        if result_check == "equal":
            value = output
            if value != str(qmp_o):
                raise exceptions.TestFail("QMP command return value does not match "
                                          "the expect result. Expect result: '%s'\n"
                                          "Actual result: '%s'" % (value, qmp_o))
        elif result_check == "contain":
            values = output.split(';')
            for value in values:
                if value in exception_list:
                    continue
                if value.strip() not in str(qmp_o):
                    raise exceptions.TestFail("QMP command output does not contain "
                                              "expect result. Expect result: '%s'\n"
                                              "Actual result: '%s'"
                                              % (value, qmp_o))
        elif result_check == "not_contain":
            values = output.split(';')
            for value in values:
                if value in exception_list:
                    continue
                if value in str(qmp_o):
                    raise exceptions.TestFail("QMP command output contains unexpect"
                                              " result. Unexpect result: '%s'\n"
                                              "Actual result: '%s'"
                                              % (value, qmp_o))
        elif result_check == "m_equal_q":
            msg = "QMP command ouput is not equal to in human monitor command."
            msg += "\nQMP command output: '%s'" % qmp_o
            msg += "\nHuman command output: '%s'" % output
            res = output.splitlines(True)
            if type(qmp_o) != type(res):
                len_o = 1
            else:
                len_o = len(qmp_o)
            if len(res) != len_o:
                if res[0].startswith(' '):
                    raise exceptions.TestFail("Human command starts with ' ', "
                                              "there is probably some garbage in "
                                              "the output.\n" + msg)
                res_tmp = []
                #(qemu)info block in RHEL7 divided into 3 lines
                for line in res:
                    if not line.startswith(' '):
                        res_tmp.append(line)
                    else:
                        res_tmp[-1] += line
                res = res_tmp
                if len(res) != len_o:
                    raise exceptions.TestFail(msg)
            re_str = r'([^ \t\n\r\f\v=]*)=([^ \t\n\r\f\v=]*)'
            for i in range(len(res)):
                if qmp_cmd == "query-version":
                    version = qmp_o['qemu']
                    version = "%s.%s.%s" % (version['major'], version['minor'],
                                            version['micro'])
                    package = qmp_o['package']
                    re_str = r"([0-9]+\.[0-9]+\.[0-9]+)\s*(\(\S*\))?"
                    hmp_version, hmp_package = re.findall(re_str, res[i])[0]
                    if not hmp_package:
                        hmp_package = package
                    hmp_package = hmp_package.strip()
                    package = package.strip()
                    hmp_version = hmp_version.strip()
                    if version != hmp_version or package != hmp_package:
                        raise exceptions.TestFail(msg)
                else:
                    matches = re.findall(re_str, res[i])
                    for key, val in matches:
                        if key in exception_list:
                            continue
                        if '0x' in val:
                            val = long(val, 16)
                            val_str = str(bin(val))
                            com_str = ""
                            for p in range(3, len(val_str)):
                                if val_str[p] == '1':
                                    com_str += '0'
                                else:
                                    com_str += '1'
                            com_str = "0b" + com_str
                            value = eval(com_str) + 1
                            if val_str[2] == '1':
                                value = -value
                            if value != qmp_o[i][key]:
                                msg += "\nValue in human monitor: '%s'" % value
                                msg += "\nValue in qmp: '%s'" % qmp_o[i][key]
                                raise exceptions.TestFail(msg)
                        elif qmp_cmd == "query-block":
                            cmp_str = "u'%s': u'%s'" % (key, val)
                            cmp_s = "u'%s': %s" % (key, val)
                            if '0' == val:
                                cmp_str_b = "u'%s': False" % key
                            elif '1' == val:
                                cmp_str_b = "u'%s': True" % key
                            else:
                                cmp_str_b = cmp_str
                            if (cmp_str not in str(qmp_o[i]) and
                                    cmp_str_b not in str(qmp_o[i]) and
                                    cmp_s not in str(qmp_o[i])):
                                msg += ("\nCan not find '%s', '%s' or '%s' in "
                                        " QMP command output."
                                        % (cmp_s, cmp_str_b, cmp_str))
                                raise exceptions.TestFail(msg)
                        elif qmp_cmd == "query-balloon":
                            if (int(val) * 1024 * 1024 != qmp_o[key] and
                                    val not in str(qmp_o[key])):
                                msg += ("\n'%s' is not in QMP command output"
                                        % val)
                                raise exceptions.TestFail(msg)
                        else:
                            if (val not in str(qmp_o[i][key]) and
                                    str(bool(int(val))) not in str(qmp_o[i][key])):
                                msg += ("\n'%s' is not in QMP command output"
                                        % val)
                                raise exceptions.TestFail(msg)
        elif result_check == "m_in_q":
            res = output.splitlines(True)
            msg = "Key value from human monitor command is not in"
            msg += "QMP command output.\nQMP command output: '%s'" % qmp_o
            msg += "\nHuman monitor command output '%s'" % output
            for i in range(len(res)):
                params = res[i].rstrip().split()
                for param in params:
                    if param.rstrip() in exception_list:
                        continue
                    try:
                        str_o = str(qmp_o.values())
                    except AttributeError:
                        str_o = qmp_o
                    if param.rstrip() not in str(str_o):
                        msg += "\nKey value is '%s'" % param.rstrip()
                        raise error.TestFail(msg)
        elif result_check == "m_format_q":
            match_flag = True
            for i in qmp_o:
                if output is None:
                    raise exceptions.TestError("QMP output pattern is missing")
                if re.match(output.strip(), str(i)) is None:
                    match_flag = False
            if not match_flag:
                msg = "Output does not match the pattern: '%s'" % output
                raise exceptions.TestFail(msg)

    qemu_binary = utils_misc.get_qemu_binary(params)
    if not utils_misc.qemu_has_option("qmp", qemu_binary):
        raise exceptions.TestSkipError("Host qemu does not support qmp.")

    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()

    session = vm.wait_for_login(timeout=int(params.get("login_timeout", 360)))

    module = params.get("modprobe_module")
    if module:
        logging.info("modprobe the module %s", module)
        session.cmd("modprobe %s" % module)

    qmp_ports = vm.get_monitors_by_type('qmp')
    if qmp_ports:
        qmp_port = qmp_ports[0]
    else:
        raise exceptions.TestError("Incorrect configuration, no QMP monitor found.")
    hmp_ports = vm.get_monitors_by_type('human')
    if hmp_ports:
        hmp_port = hmp_ports[0]
    else:
        raise exceptions.TestError("Incorrect configuration, no QMP monitor found.")
    callback = {"host_cmd": lambda cmd: process.system_output(cmd, shell=True),
                "guest_cmd": session.get_command_output,
                "monitor_cmd": hmp_port.send_args_cmd,
                "qmp_cmd": qmp_port.send_args_cmd}

    def send_cmd(cmd):
        """ Helper to execute command on ssh/host/monitor """
        if cmd_type in callback.keys():
            return callback[cmd_type](cmd)
        else:
            raise exceptions.TestError("cmd_type is not supported")

    pre_cmd = params.get("pre_cmd")
    qmp_cmd = params.get("qmp_cmd")
    cmd_type = params.get("event_cmd_type")
    post_cmd = params.get("post_cmd")
    result_check = params.get("cmd_result_check")
    cmd_return_value = params.get("cmd_return_value")
    exception_list = params.get("exception_list", "")

    # Pre command
    if pre_cmd is not None:
        logging.info("Run prepare command '%s'.", pre_cmd)
        pre_o = send_cmd(pre_cmd)
        logging.debug("Pre-command: '%s'\n Output: '%s'", pre_cmd, pre_o)
    try:
        # Testing command
        logging.info("Run qmp command '%s'.", qmp_cmd)
        output = qmp_port.send_args_cmd(qmp_cmd)
        logging.debug("QMP command: '%s' \n Output: '%s'", qmp_cmd, output)
    except qemu_monitor.QMPCmdError, err:
        if params.get("negative_test") == 'yes':
            logging.debug("Negative QMP command: '%s'\n output:'%s'", qmp_cmd,
                          err)
            if params.get("negative_check_pattern"):
                check_pattern = params.get("negative_check_pattern")
                if check_pattern not in str(err):
                    raise exceptions.TestFail("'%s' not in exception '%s'"
                                              % (check_pattern, err))
        else:
            raise exceptions.TestFail(err)
Esempio n. 56
0
def setStatusFile(api:to_api.API) -> bool:
	"""
	Attempts to set the status file according to this server's reported status in Traffic Ops.

	.. warning:: This will create the directory '/opt/ORTstatus' if it does not exist, and may
		delete files there without warning!

	:param api: A :class:`traffic_ops_ort.to_api.API` object to use when interacting with Traffic Ops
	:returns: whether or not the status file could be set properly
	"""
	global STATUS_FILE_DIR
	from .configuration import MODE, Modes
	from . import utils
	logging.info("Setting status file")

	if not isinstance(MODE, Modes):
		logging.error("MODE is not set to a valid Mode (from traffic_ops_ort.configuration.Modes)!")
		return False

	try:
		myStatus = api.getMyStatus()
	except ConnectionError as e:
		logging.error("Failed to set status file - Traffic Ops connection failed")
		return False

	if not os.path.isdir(STATUS_FILE_DIR):
		logging.warning("status directory does not exist, creating...")
		doMakeDir = MODE is not Modes.REPORT

		# Check for user confirmation if in 'INTERACTIVE' mode
		if doMakeDir and (MODE is not Modes.INTERACTIVE or\
		   utils.getYesNoResponse("Create status directory '%s'?" % STATUS_FILE_DIR, default='Y')):
			try:
				os.makedirs(STATUS_FILE_DIR)
				return False
			except OSError as e:
				logging.error("Failed to create status directory '%s' - %s", STATUS_FILE_DIR, e)
				logging.debug("%s", e, exc_info=True, stack_info=True)
				return False
	else:
		try:
			deleteOldStatusFiles(myStatus, api)
		except ConnectionError as e:
			logging.error("Failed to delete old status files - Traffic Ops connection failed.")
			logging.debug("%s", e, exc_info=True, stack_info=True)
			return False
		except OSError as e:
			logging.error("Failed to delete old status files - %s", e)
			logging.debug("%s", e, exc_info=True, stack_info=True)
			return False

	fname = os.path.join(STATUS_FILE_DIR, myStatus)
	if not os.path.isfile(fname):
		logging.info("File '%s' to be created", fname)
		if MODE is not Modes.REPORT and\
		  (MODE is not Modes.INTERACTIVE or utils.getYesNoResponse("Create file '%s'?", 'y')):

			try:
				with open(fname, 'x'):
					pass
			except OSError as e:
				logging.error("Failed to create status file - %s", e)
				logging.debug("%s", e, exc_info=True, stack_info=True)
				return False

	return True
Esempio n. 57
0
    def __dispatchTask(self, target):
        # '/niub/www/sourcecode/ResearchReporterTool-2.4-ssd-123/image_ids_isolate'+ str(target)
        logging.info('target:' + str(target))
        image_ids = None
        if target == '0':
            logging.info('read file')
            image_ids = open('image_ids', 'r')
            ids_lines = image_ids.readlines()

        else:
            logging.info('parameter:' + target)
            ids_lines = target.split(',')

        for line in ids_lines:
            id = line.strip('\n')
            id = id.strip('\r')

            logging.info('Start to select data')

            logging.info('the id is :' + str(id))
            chart_items = self.collection.find({'_id': id})

            #chart_items = self.collection.find({'text_info': {'$exists': 1}})

            # print chart_items
            chart_items = self.convertRecordsToArray(chart_items)
            length = len(chart_items)
            logging.info(length)
            logging.info('Finished to select data')
            if length == 0:
                time.sleep(self.__sleep_time_per_request_none)
            else:
                self.thread_manager.run(chart_items,
                                        self.__updateThreadFunc,
                                        target=target)
                time.sleep(self.__sleep_time_per_request)
        if image_ids is not None:
            image_ids.close()
        logging.info('Done')
Esempio n. 58
0
            loss_total_finetune.update(__loss_finetune[0], len(__y_true_finetune[0]))
            loss_xy_finetune.update(__loss_finetune[1], len(__y_true_finetune[0]))
            loss_wh_finetune.update(__loss_finetune[2], len(__y_true_finetune[0]))
            loss_conf_finetune.update(__loss_finetune[3], len(__y_true_finetune[0]))
            loss_class_finetune.update(__loss_finetune[4], len(__y_true_finetune[0]))

            if __global_step_finetune % args.train_evaluation_step == 0 and __global_step_finetune > 0:
                # recall, precision = evaluate_on_cpu(__y_pred, __y_true, args.class_num, args.nms_topk, args.score_threshold, args.eval_threshold)
                recall_finetune, precision_finetune = evaluate_on_gpu(sess, gpu_nms_op, pred_boxes_flag, pred_scores_flag, __y_pred_finetune,
                                                    __y_true_finetune, args.class_num, args.eval_threshold)

                info_finetune = "Epoch: {}, global_step: {} | loss: total: {:.2f}, xy: {:.2f}, wh: {:.2f}, conf: {:.2f}, class: {:.2f} | ".format(
                    epoch, int(__global_step_finetune), loss_total_finetune.avg, loss_xy_finetune.avg, loss_wh_finetune.avg, loss_conf_finetune.avg, loss_class_finetune.avg)
                info_finetune += 'Last batch: rec: {:.3f}, prec: {:.3f} | lr: {:.5g}'.format(recall_finetune, precision_finetune, __lr_finetune)
                print(info_finetune)
                logging.info(info_finetune)

                writer_fintune.add_summary(make_summary('evaluation/train_batch_recall', recall_finetune), global_step=__global_step_finetune)
                writer_fintune.add_summary(make_summary('evaluation/train_batch_precision', precision_finetune),
                                   global_step=__global_step_finetune)

                if np.isnan(loss_total_finetune.last_avg):
                    print('****' * 10)
                    raise ArithmeticError(
                        'Gradient exploded! Please train again and you may need modify some parameters.')

        if epoch % args.val_evaluation_epoch == 0 and epoch > 0:
            sess.run(val_init_op)

            val_loss_total_finetune, val_loss_xy_finetune, val_loss_wh_finetune, val_loss_conf_finetune, val_loss_class_finetune = \
                AverageMeter(), AverageMeter(), AverageMeter(), AverageMeter(), AverageMeter()
Esempio n. 59
0
    def load_model(
        self,
        model,
        device,
        input_size,
        output_size,
        num_requests,
        cpu_extension=None,
        plugin=None,
    ):
        """
         Loads a network and an image to the Inference Engine plugin.
        :param model: .xml file of pre trained model
        :param cpu_extension: extension for the CPU device
        :param device: Target device
        :param input_size: Number of input layers
        :param output_size: Number of output layers
        :param num_requests: Index of Infer request value. Limited to device capabilities.
        :param plugin: Plugin for specified device
        :return:  Shape of input layer
        """

        model_xml = model
        model_bin = os.path.splitext(model_xml)[0] + ".bin"
        # Plugin initialization for specified device
        # and load extensions library if specified
        if not plugin:
            log.info("Initializing plugin for {} device...".format(device))
            self.plugin = IEPlugin(device=device)
        else:
            self.plugin = plugin

        if cpu_extension and "CPU" in device:
            self.plugin.add_cpu_extension(cpu_extension)

        # Read IR
        log.info("Reading IR...")
        self.net = IENetwork(model=model_xml, weights=model_bin)
        log.info("Loading IR to the plugin...")

        if self.plugin.device == "CPU":
            supported_layers = self.plugin.get_supported_layers(self.net)
            not_supported_layers = [
                l for l in self.net.layers.keys() if l not in supported_layers
            ]
            if len(not_supported_layers) != 0:
                log.error(
                    "Following layers are not supported by "
                    "the plugin for specified device {}:\n {}".format(
                        self.plugin.device, ", ".join(not_supported_layers)
                    )
                )
                log.error(
                    "Please try to specify cpu extensions library path"
                    " in command line parameters using -l "
                    "or --cpu_extension command line argument"
                )
                sys.exit(1)

        if num_requests == 0:
            # Loads network read from IR to the plugin
            self.net_plugin = self.plugin.load(network=self.net)
        else:
            self.net_plugin = self.plugin.load(
                network=self.net, num_requests=num_requests
            )

        self.input_blob = next(iter(self.net.inputs))
        self.out_blob = next(iter(self.net.outputs))
        assert (
            len(self.net.inputs.keys()) == input_size
        ), "Supports only {} input topologies".format(len(self.net.inputs))
        assert (
            len(self.net.outputs) == output_size
        ), "Supports only {} output topologies".format(len(self.net.outputs))

        return self.plugin, self.get_input_shape()
Esempio n. 60
0
                          err)
            if params.get("negative_check_pattern"):
                check_pattern = params.get("negative_check_pattern")
                if check_pattern not in str(err):
                    raise exceptions.TestFail("'%s' not in exception '%s'"
                                              % (check_pattern, err))
        else:
            raise exceptions.TestFail(err)
    except qemu_monitor.MonitorProtocolError, err:
        raise exceptions.TestFail(err)
    except Exception, err:
        raise exceptions.TestFail(err)

    # Post command
    if post_cmd is not None:
        logging.info("Run post command '%s'.", post_cmd)
        post_o = send_cmd(post_cmd)
        logging.debug("Post-command: '%s'\n Output: '%s'", post_cmd, post_o)

    if result_check is not None:
        txt = "Verify that qmp command '%s' works as designed." % qmp_cmd
        logging.info(txt)
        if result_check == "equal" or result_check == "contain":
            if qmp_cmd == "query-name":
                vm_name = params["main_vm"]
                check_result(output, vm_name, exception_list)
            elif qmp_cmd == "query-uuid":
                uuid_input = params["uuid"]
                check_result(output, uuid_input, exception_list)
            else:
                check_result(output, cmd_return_value, exception_list)