Example #1
1
 def get_info(self):
     logging.debug("Checking for StockPickr User %s", self.user)
     sql = "select uid from spr_user where name=%s;"
     self.id = self.db.query(sql, self.user, one=True)
     if not self.id:
         self.db.insert("spr_user", {"name": self.user})
         self.id = self.db.last_insert_id()
Example #2
0
    def extract(vm, remote_path, dest_dir):
        """
        Extract the autotest .tar.bz2 file on the guest, ensuring the final
        destination path will be dest_dir.

        :param vm: VM object
        :param remote_path: Remote file path
        :param dest_dir: Destination dir for the contents
        """
        basename = os.path.basename(remote_path)
        logging.debug("Extracting %s on VM %s", basename, vm.name)
        session.cmd("rm -rf %s" % dest_dir, timeout=240)
        dirname = os.path.dirname(remote_path)
        session.cmd("cd %s" % dirname)
        session.cmd("mkdir -p %s" % os.path.dirname(dest_dir))
        e_cmd = "tar xjvf %s -C %s" % (basename, os.path.dirname(dest_dir))
        output = session.cmd(e_cmd, timeout=240)
        autotest_dirname = ""
        for line in output.splitlines()[1:]:
            autotest_dirname = line.split("/")[0]
            break
        if autotest_dirname != os.path.basename(dest_dir):
            session.cmd("cd %s" % os.path.dirname(dest_dir))
            session.cmd("mv %s %s" %
                        (autotest_dirname, os.path.basename(dest_dir)))
Example #3
0
def tearDownModule():
  global vtgate_server
  logging.debug("in tearDownModule")
  if utils.options.skip_teardown:
    return
  logging.debug("Tearing down the servers and setup")
  utils.vtgate_kill(vtgate_server)
  tablet.kill_tablets([shard_0_master, shard_0_replica, shard_1_master,
                       shard_1_replica])
  teardown_procs = [shard_0_master.teardown_mysql(),
                    shard_0_replica.teardown_mysql(),
                    shard_1_master.teardown_mysql(),
                    shard_1_replica.teardown_mysql(),
                   ]
  utils.wait_procs(teardown_procs, raise_on_error=False)

  environment.topo_server_teardown()

  utils.kill_sub_processes()
  utils.remove_tmp_files()

  shard_0_master.remove_tree()
  shard_0_replica.remove_tree()
  shard_1_master.remove_tree()
  shard_1_replica.remove_tree()
Example #4
0
    def shell(self):
        """Performs the 'shell' command."""
        lib_jars = []
        if self.flags.jars is not None:
            lib_jars.extend(self.flags.jars.split(","))

        env = dict(self.env)

        express_cp = self.express.get_classpath(lib_jars=lib_jars)
        env["EXPRESS_CP"] = ":".join(express_cp)

        dist_cache_paths = self.express.list_paths_for_dist_cache(lib_jars=lib_jars)
        env["TMPJARS"] = ",".join(dist_cache_paths)

        # EXPRESS_MODE environment variable must be a path to the mode specific scala script:
        script_name = self._MODE_SCRIPT.get(self.flags.mode)
        assert (script_name is not None), ("Invalid Express mode: %r." % self.flags.mode)
        script_path = os.path.join(self.express.home_dir, "bin", script_name)
        assert os.path.isfile(script_path), ("Script not found: %r" % script_path)
        env["EXPRESS_MODE"] = script_path

        # express shell binary needs to be in the same directory as this script
        shell_path = os.path.join(self.express.home_dir, "bin", "express-shell")
        assert os.path.isfile(shell_path), ("Shell not found: %r" % shell_path)
        cmd = [shell_path] + list(self.args)
        logging.debug(
            "Launching the express shell using the command:\n%s",
            " \\\n\t".join(map(repr, cmd)))
        proc = subprocess.Popen(cmd, env=env)
        try:
            return proc.wait()
        except subprocess.SubprocessError:
            proc.kill()
    def get(self, version_id):
        self.check_version_id(version_id)

        cache_key = version_id + str(self.__class__)
        page = self.cache.get(cache_key)

        if self.cache_bust or not page:
            logging.debug('Cache miss with key: %s' % cache_key)
            retrieved_data = EmailTemplate.fetch_all(self.data_sources[version_id])
            trail_blocks = deduplication.build_unique_trailblocks(retrieved_data,
                self.priority_list[version_id],
                excluded=self.exclude_from_deduplication())
            today = datetime.datetime.now()
            date = today.strftime('%A %d %b %Y')

            template_name = self.template_names[version_id] + '.html'
            template = self.resolve_template(template_name)

            ads = {}

            if hasattr(self, 'ad_tag') and self.ad_tag:
                ad_fetcher = AdFetcher(self.ad_tag)
                for name, type in self.ad_config.iteritems():
                    ads[name] = ad_fetcher.fetch_type(type)

            page = template.render(ads=ads, date=date, data=self.additional_template_data(), **trail_blocks)
            self.cache.add(cache_key, page, 300)
        else:
            logging.debug('Cache hit with key: %s' % cache_key)

        self.response.out.write(page)
Example #6
0
    def schema_shell(self):
        """Performs the 'schema-shell' command."""
        schema_shell_home = self.env.get(SCHEMA_SHELL_HOME)
        assert (schema_shell_home is not None), \
            ("Environment variable undefined: %r" % SCHEMA_SHELL_HOME)
        assert os.path.isdir(schema_shell_home), \
            ("Invalid home directory for KijiSchema shell: %r" % schema_shell_home)
        schema_shell_script = os.path.join(schema_shell_home, "bin", "kiji-schema-shell")
        assert os.path.isfile(schema_shell_script), \
            ("KijiSchema shell not found: %r" % schema_shell_script)

        env = dict(self.env)

        classpath = env.get(KIJI_CLASSPATH, "").split(":") + list(self.express.get_classpath())
        env[KIJI_CLASSPATH] = ":".join(classpath)

        java_opts = env.get("JAVA_OPTS", "")
        # FIXME: I cannot find any trace of the Java system property "express.tmpjars"!
        # java_opts += (" -Dexpress.tmpjars=%s" % ???)

        # Relevant for KijiSchema 1.1 only and will be removed in Express 3.0:
        java_opts += " -Dorg.kiji.schema.impl.AvroCellEncoder.SCHEMA_VALIDATION=DISABLED"
        env["JAVA_OPTS"] = java_opts

        cmd = [schema_shell_script]
        logging.debug("Launching kiji-schema shell with:\n%s\with KIJI_CLASSPATH:\n%s",
                      " \\\n\t".join(map(repr, cmd)), "\n".join(map(tab_indent, classpath)))
        logging.debug("Computed KIJI_CLASSPATH:")
        proc = subprocess.Popen(cmd, env=env)
        try:
            return proc.wait()
        except subprocess.SubProcessError:
            proc.kill()
Example #7
0
def setup_logging(log_level):
  """Initializes the logging system.

  Args:
    log_level: Logging level.
  """
  global _LOGGING_INITIALIZED
  if _LOGGING_INITIALIZED:
    logging.debug("setup_logging: logging system already initialized")
    return

  log_formatter = logging.Formatter(
      fmt="%(asctime)s %(levelname)s %(filename)s:%(lineno)s : %(message)s",
  )

  # Override the log date formatter to include the time zone:
  def format_time(record, datefmt=None):
    time_tuple = time.localtime(record.created)
    tz_name = time.tzname[time_tuple.tm_isdst]
    return "%(date_time)s-%(millis)03d-%(tz_name)s" % dict(
        date_time=time.strftime("%Y%m%d-%H%M%S", time_tuple),
        millis=record.msecs,
        tz_name=tz_name,
    )
  log_formatter.formatTime = format_time

  logging.root.handlers.clear()
  logging.root.setLevel(log_level)

  console_handler = logging.StreamHandler()
  console_handler.setFormatter(log_formatter)
  console_handler.setLevel(log_level)
  logging.root.addHandler(console_handler)

  _LOGGING_INITIALIZED = True
Example #8
0
    def jar(self):
        """Performs the 'jar' command."""
        class_name = getattr(self.flags, "class")
        if (class_name is None) and (len(self.args) > 0):
            class_name = self.pop_args_head()
        assert (class_name is not None), ("No class name specified with [--class=]<class>.")

        lib_jars = []
        if self.flags.jars is not None:
            lib_jars.extend(self.flags.jars)
        classpath = list(self.express.get_classpath(lib_jars=lib_jars))

        java_opts = []
        if self.flags.java_opts is not None:
            java_opts = [self.flags.java_opts]

        user_args = list(self.args)
        logging.info("Running java class %r with parameters: %r", class_name, user_args)

        cmd = [
            "java",
            # This property is only needed in kiji-schema v1.1 :
            "-Dorg.kiji.schema.impl.AvroCellEncoder.SCHEMA_VALIDATION=DISABLED",
        ] + java_opts + [
            "-classpath", ":".join(classpath),
            class_name,
        ] + user_args

        logging.debug("Running command:\n%s\n", " \\\n\t".join(map(repr, cmd)))
        return subprocess.call(cmd)
Example #9
0
    def process_request(self, request):
        # SESSION_KEY = '_auth_user_id'
        # BACKEND_SESSION_KEY = '_auth_user_backend'
        token = request.GET.get('token', False)
        token_id = request.GET.get('id', False)
        timestamp = request.GET.get('timestamp', False)
        if token and token_id and timestamp:
            logging.debug('SSO: user %s login attempt via SSO in with timestamp %s and token %s \n' % (
                token_id, timestamp, token))
            if self.check_token(token, token_id, timestamp):
                # everything passed, authenticate user
                logging.debug('SSO: user %s token and timestamp pass \n' % token_id)
                logging.debug('SSO: Attempting to authenticate as %s \n' % token_id)
                try:
                    user = self.authenticate(token_id)
                except Exception as e:
                    logging.debug('SSO: user %s does not exist, trying to create \n' % token_id)
                    rooibos_LDAP.new_account_from_ldap(token_id)

                if user.username == token_id:
                    try:
                        # THIS WAS THE KEY TO IT WORKING
                        user.backend = settings.SSO_BACKEND
                        login(request, user)
                        logging.debug('SSO: process_request - user.backend = %s' % user.backend)
                        #return None
                        #logging.debug(user.last_login)
                        # logging.debug(request.session['_auth_user_id'])
                        # logging.debug(request.session['_auth_user_backend'])
                    except Exception:
                        raise
Example #10
0
def getContestData(contestCode, expiryTime = None, writeInFile = None):

    logging.debug("In getContestData("+contestCode+')')
    expiryTime, writeInFile = getGlobals(expiryTime, writeInFile)

    data = {}

    if expiryTime > 0:
        data = checkInFile('contest/' + contestCode, expiryTime)
        if data is not None:
            return data
        else:
            data = {}

    URL = "https://www.codechef.com/api/contests/" + contestCode

    data = json.loads(requests.get(URL, headers={'User-Agent': 'Mozilla/5.0'}).text)

    #Make start_time and end_time keys directly in data
    data['start_time'] = data['time']['start']
    data['end_time'] = data['time']['end']

    #Removing unnecessary keys.
    keysToRemove = ['problems_data','time','problemsstats', 'user', 'announcements', 'rules', 'autoRefresh', 'banner', 'todos']
    data = removeKeys(data, keysToRemove)

    #From here too.
    for contest in data['problems']:
        data['problems'][contest] = removeKeys(data['problems'][contest], ['status_url','submit_url','problem_url','allow_submission'])

    if writeInFile:
        writeToFile('contest/' + contestCode, data)

    logging.debug("getContestData() = " + json.dumps(data, indent = 4))
    return data
  def _LoadFromFile(self):
    """Read the version file and set the version components"""
    with open(self.version_file, 'r') as version_fh:
      for line in version_fh:
        if not line.strip():
          continue

        match = self.FindValue('CHROME_BRANCH', line)
        if match:
          self.chrome_branch = match
          logging.debug('Set the Chrome branch number to:%s',
                        self.chrome_branch)
          continue

        match = self.FindValue('CHROMEOS_BUILD', line)
        if match:
          self.build_number = match
          logging.debug('Set the build version to:%s', self.build_number)
          continue

        match = self.FindValue('CHROMEOS_BRANCH', line)
        if match:
          self.branch_build_number = match
          logging.debug('Set the branch version to:%s',
                        self.branch_build_number)
          continue

        match = self.FindValue('CHROMEOS_PATCH', line)
        if match:
          self.patch_number = match
          logging.debug('Set the patch version to:%s', self.patch_number)
          continue

    logging.debug(self.VersionString())
  def PublishManifest(self, manifest, version, build_id=None):
    """Publishes the manifest as the manifest for the version to others.

    Args:
      manifest: Path to manifest file to publish.
      version: Manifest version string, e.g. 6102.0.0-rc4
      build_id: Optional integer giving build_id of the build that is
                publishing this manifest. If specified and non-negative,
                build_id will be included in the commit message.
    """
    # Note: This commit message is used by master.cfg for figuring out when to
    #       trigger slave builders.
    commit_message = 'Automatic: Start %s %s %s' % (self.build_names[0],
                                                    self.branch, version)
    if build_id is not None and build_id >= 0:
      commit_message += '\nCrOS-Build-Id: %s' % build_id

    logging.info('Publishing build spec for: %s', version)
    logging.info('Publishing with commit message: %s', commit_message)
    logging.debug('Manifest contents below.\n%s', osutils.ReadFile(manifest))

    # Copy the manifest into the manifest repository.
    spec_file = '%s.xml' % os.path.join(self.all_specs_dir, version)
    osutils.SafeMakedirs(os.path.dirname(spec_file))

    shutil.copyfile(manifest, spec_file)

    # Actually push the manifest.
    self.PushSpecChanges(commit_message)
Example #13
0
 def __init__(self, openid_, issued, attrs=None, sreg_=None):
     logging.debug('init janrain openid object')
     self.openid = openid_
     self.issued = issued
     self.attrs = attrs or {}
     self.sreg = sreg_ or {}
     self.is_iname = (xri.identifierScheme(openid_) == 'XRI')
Example #14
0
 def run_and_check_vcpupin(vm, vm_ref, vcpu, cpu_list, options):
     """
     Run the vcpupin command and then check the result.
     """
     if vm_ref == "name":
         vm_ref = vm.name
     elif vm_ref == "uuid":
         vm_ref = vm.get_uuid()
     # Execute virsh vcpupin command.
     cmdResult = virsh.vcpupin(vm_ref, vcpu, cpu_list, options, debug=True)
     if cmdResult.exit_status:
         if not status_error:
             # Command fail and it is in positive case.
             raise error.TestFail(cmdResult)
         else:
             # Command fail and it is in negative case.
             return
     else:
         if status_error:
             # Command success and it is in negative case.
             raise error.TestFail(cmdResult)
         else:
             # Command success and it is in positive case.
             # "--config" will take effect after VM destroyed.
             pid = None
             vcpu_pid = None
             if options == "--config":
                 virsh.destroy(vm.name)
             else:
                 pid = vm.get_pid()
                 logging.debug("vcpus_pid: %s", vm.get_vcpus_pid())
                 vcpu_pid = vm.get_vcpus_pid()[vcpu]
             # Check the result of vcpupin command.
             check_vcpupin(vm.name, vcpu, cpu_list, pid, vcpu_pid)
Example #15
0
  def test_restart(self):
    """test_restart tests that when starting a second vttablet with the same
    configuration as another one, it will kill the previous process
    and take over listening on the socket.

    If vttablet listens to other ports (like gRPC), this feature will
    break. We believe it is not widely used, so we're OK with this for now.
    (container based installations usually handle tablet restarts
    by using a different set of servers, and do not rely on this feature
    at all).
    """
    if environment.topo_server().flavor() != 'zookeeper':
      logging.info("Skipping this test in non-github tree")
      return
    if tablet_62344.grpc_enabled():
      logging.info("Skipping this test as second gRPC port interferes")
      return

    utils.run_vtctl(['CreateKeyspace', 'test_keyspace'])

    # create the database so vttablets start, as it is serving
    tablet_62344.create_db('vt_test_keyspace')

    tablet_62344.init_tablet('master', 'test_keyspace', '0')
    proc1 = tablet_62344.start_vttablet()
    proc2 = tablet_62344.start_vttablet()
    for timeout in xrange(20):
      logging.debug("Sleeping waiting for first process to die")
      time.sleep(1.0)
      proc1.poll()
      if proc1.returncode is not None:
        break
    if proc1.returncode is None:
      self.fail("proc1 still running")
    tablet_62344.kill_vttablet()
Example #16
0
def graph_user(user, depth=0):
    logging.debug("Searching for %s", user)
    logging.debug("At depth %d", depth)
    followers = following = []
    result = [x.value for x in db.query(select_user % user)]

    if result:
        result = result.pop()
        following = result['following']
        followers = result['followers']

    if not GH.has_node(user):
        logging.debug("Adding %s to graph", user)
        GH.add_node(user)

    for follower in followers:
        if not GH.has_node(follower):
            GH.add_node(follower)
            logging.debug("Adding %s to graph", follower)
            if depth < max_depth:
                graph_user(follower, depth + 1)
        GH.add_edge(follower, user, {'weight': 2})

    for follow in following:
        if not GH.has_node(follow):
            GH.add_node(follow)
            logging.debug("Adding %s to graph", follow)
            if depth < max_depth:
                graph_user(follow, depth + 1)

        if GH.has_edge(follow, user):
            GH[follow][user]['weight'] += 1
        else:
            GH.add_edge(user, follow, {'weight': 1})
Example #17
0
 def execute(self, email_models):
     logging.debug("In Destiny::execute()")
     if not email_models:
         return
     emails_id = []
     destinations = {}
     for destiny in self._plugins.keys():
         destinations.setdefault(destiny, email_models[-1].get(destiny))
         emails_id.append(email_models[-1].email_id())
     for email_model in email_models[:-1]:
         for destiny in self._plugins.keys():
             d_tables = destinations.get(destiny).get("tables")
             for d_table in d_tables:
                 for k, v in d_table.iteritems():
                     m_tables = email_model.get(destiny).get("tables")
                     for m_table in m_tables:
                         if k in m_table:
                             d_table.setdefault(k, []).extend(m_table[k])
         emails_id.append(email_model.email_id())
     for destiny, models in destinations.iteritems():
         for forward in self._plugins.get(destiny):
             try:
                 forward.execute(models)
             except Exception, e:
                 logging.error("!! Error-execute: %s" % (str(e),))
                 logging.info("Add emails in queure error: %s" % str(emails_id))
                 for email_id in emails_id:
                     self.add_email_error(email_id)
                 continue
Example #18
0
def refetch(source):
  """Refetch the author's URLs and look for new or updated syndication
  links that might not have been there the first time we looked.

  Args:
    source: models.Source subclass. Changes to property values (e.g. domains,
      domain_urls, last_syndication_url) are stored in source.updates; they
      should be updated transactionally later.

  Return:
    a dict of syndicated_url to a list of new models.SyndicatedPosts
  """
  if not source.updates:
    source.updates = {}

  logging.debug('attempting to refetch h-feed for %s', source.label())
  results = {}
  for url in _get_author_urls(source):
    results.update(_process_author(source, url, refetch=True))

  now = util.now_fn()
  logging.debug('updating source last_hfeed_fetch %s', now)
  source.updates['last_hfeed_fetch'] = now

  return results
Example #19
0
    def call(self, function, params=None):
        self.requestPerMinute += 1
        now = datetime.utcnow()

        if self.requestPerMinute >= self.requestLimit:
            waittime = 60 - now.second
            logging.warning("Limit for request per minute exceeded. Waiting for: {0} sec.".format(waittime))
            time.sleep(waittime)
            now = datetime.utcnow()

        if self.checkMinute != now.minute:
            self.requestPerMinute = 0
            self.checkMinute = now.minute

        payload = ''
        try:
            p = "" if not params else '?' + "&".join(
                ["{key}={value}".format(key=k, value=v) for (k, v) in params.iteritems()])
            url = "{base}.{func}{params}".format(base=self.baseConfig["url"], func=function, params=p)
            logging.debug("{0} {1} API call:{2}".format(self.checkMinute, self.requestPerMinute, url))
            request = urllib2.Request(url, None, self.baseConfig["headers"])
            stream = urllib2.urlopen(request)
            payload = stream.read()
            data = json.loads(payload)
            if isinstance(data, dict) and 'ruid' in data:
                logging.error('Api call failed with error: {0} Code: {1}'.format(data['message'], data['code']))
                return None
            return data

        except Exception as e:
            logging.error('Error: {0} Context: {1}'.format(e, payload))
            return None
Example #20
0
def make_parser():
    """ Construct the command line parser """
    logging.info("Constructing parser")
    description = "Store and retrieve snippets of text"
    parser = argparse.ArgumentParser(description=description)

    subparsers = parser.add_subparsers(help="Available commands")

    # Subparser for the put command
    logging.debug("Constructing put subparser")
    put_parser = subparsers.add_parser("put", help="Store a snippet")
    put_parser.add_argument("name", help="The name of the snippet")
    put_parser.add_argument("snippet", help="The snippet text")
    put_parser.add_argument("filename", default="snippets.csv", nargs="?",
                            help="The snippet filename")
    put_parser.set_defaults(command="put")

    # Subparser for the get command
    logging.debug("Constructing get subparser")
    put_parser = subparsers.add_parser("get", help="Retrieve a snippet")
    put_parser.add_argument("name", help="The name of the snippet")
    put_parser.add_argument("filename", default="snippets.csv", nargs="?",
                            help="The snippet filename")
    put_parser.set_defaults(command="get")

    return parser
Example #21
0
    def upsert_prelude_project(self,path=None,filename=None,uuid=None,version=None,nclips=None):
        cursor=self.conn.cursor()
        self.conn.commit()

        #if uuid is None:
        #    raise DataError("You need to pass a valid uuid")

        #does the project entry already exist? If so leave it (avoiding database bloat)
        cursor.execute("SELECT id FROM prelude_projects WHERE filename=%s AND filepath=%s", (filename, path, ))
        result = cursor.fetchone()
        if result is not None:
            logging.debug("Prelude project {0}/{1} already exists in database, not touching it".format(path, filename))
            return result[0]

        try:
            sqlcmd = """insert into prelude_projects (filepath,filename,uuid,version,clips,lastseen)
                        values (%s,%s,%s,%s,%s,now()) returning id"""
            cursor.execute(sqlcmd,(path,filename,uuid,version,nclips))
        except psycopg2.IntegrityError as e: #if we violate unique keys, try to update on filename
            self.conn.rollback()
            try:
                sqlcmd = """update prelude_projects set filepath=%s, filename=%s, uuid=%s, version=%s, clips=%s, lastseen=now()
                            where filepath=%s and filename=%s returning id"""
                cursor.execute(sqlcmd,(path,filename,uuid,version,nclips,path,filename))
            except psycopg2.IntegrityError as e: #if that causes a violation, try to update on uuid
                self.conn.rollback()
                sqlcmd = """update prelude_projects set filepath=%s, filename=%s, uuid=%s, version=%s, clips=%s, lastseen=now()
                            where uuid=%s returning id"""
                cursor.execute(sqlcmd,(path,filename,uuid,version,nclips,uuid))

        self.conn.commit()
        result=cursor.fetchone()
        return result[0]    #return id of inserted row
Example #22
0
  def execute(self):
    targets = self.get_targets(self.is_non_synthetic_python_target)
    with self.invalidated(targets=targets) as invalidation_check:
      if not invalidation_check.invalid_vts:
        logging.debug(self.NOOP_MSG_HAS_TARGET_BUT_NO_SOURCE)
        return

      invalid_tgts = [vt.target for vt in invalidation_check.invalid_vts]
      sources = self._calculate_isortable_python_sources(invalid_tgts)
      if not sources:
        logging.debug(self.NOOP_MSG_HAS_TARGET_BUT_NO_SOURCE)
        return

      isort = self.context.products.get_data(IsortPrep.Isort)
      args = self.get_passthru_args() + sources

      # NB: We execute isort out of process to avoid unwanted side-effects from importing it:
      #   https://github.com/timothycrosley/isort/issues/456
      with pushd(get_buildroot()):
        workunit_factory = functools.partial(self.context.new_workunit,
                                             name='run-isort',
                                             labels=[WorkUnitLabel.TOOL, WorkUnitLabel.LINT])
        cmdline, exit_code = isort.run(workunit_factory, args)
        if exit_code != 0:
          raise TaskError('{} ... exited non-zero ({}).'.format(cmdline, exit_code),
                          exit_code=exit_code)
Example #23
0
    def __public_response(self, messages):
        message = None
        
        for m in messages:
            pretty_message = "%s [%s %s] %s" % (m.id, m.created_at, m.user.screen_name, m.text)
            logging.info("found public message: %s" % pretty_message)
            if not self.__analyzer.should_respond(m):
                logging.info("not responding")
                continue
                
            response = TwitterResponseAccessor.get_by_message_id(str(m.id))
            if not response:
                message = m
                break
            else:
                logging.debug("found response to public message %s" % m.id)
        
        sent_message = None
        if message:
            # TODO: search for username also
            username = message.user.screen_name
            parsed_tweet = parse_tweet(message.text)
            plain_tweet = parsed_tweet.plain_text
            speaker = self.__select_speaker()
            sources, mix = Mixer(speaker).mix_response(plain_tweet, min_results=1, max_length=130-len(username))
            response_text = "@%s %s" % (username, mix)
            logging.info("responding to public message %s: %s" % (message.id, response_text))
            
            sent_message = self.__twitter.PostUpdate(response_text, message.id)
            TwitterResponseAccessor.create(str(message.id), response_id=str(sent_message.id), user=username, tweet_type=TwitterResponse.MENTION) 
            self.__reporter.posted(response_text)

        return sent_message
Example #24
0
 def act(self, force_act=False, action=None, skip_responses=False):
     """
     returns:
         (action, response) tuple.  response type depends on the action that was performed.
     """        
     if not force_act:
         config = ConfigurationAccessor.get_or_create()
         if config and (config.is_tweeting is not None) and (not safe_int(config.is_tweeting)):
             logging.debug("config.is_tweeting is False; hiding")
             return ()
     
     result = []
     responded = False
     if not skip_responses:
         try:
             direct, response = self.respond()
             if (direct or response):
                 # a response to a direct message or mention was generated
                 responded = True
                 if direct:
                     result.append(direct.AsDict())
                 if response:
                     result.append(response.AsDict())
         except Exception, e:
             logging.error(e)
Example #25
0
 def test_rollback(self):
   return
   try:
     vtgate_conn = get_connection()
     count = 10
     vtgate_conn.begin()
     vtgate_conn._execute(
         "delete from vt_insert_test", {},
         KEYSPACE_NAME, 'master',
         keyranges=[self.keyrange])
     kid_list = shard_kid_map[shard_names[self.shard_index]]
     for x in xrange(count):
       keyspace_id = kid_list[count%len(kid_list)]
       vtgate_conn._execute(
           "insert into vt_insert_test (msg, keyspace_id) values (%(msg)s, %(keyspace_id)s)",
           {'msg': 'test %s' % x, 'keyspace_id': keyspace_id},
           KEYSPACE_NAME, 'master', keyspace_ids=[pack_kid(keyspace_id)])
     vtgate_conn.commit()
     vtgate_conn.begin()
     vtgate_conn._execute(
         "delete from vt_insert_test", {},
         KEYSPACE_NAME, 'master',
         keyranges=[self.keyrange])
     vtgate_conn.rollback()
     results, rowcount = vtgate_conn._execute(
         "select * from vt_insert_test", {},
         KEYSPACE_NAME, 'master',
         keyranges=[self.keyrange])[:2]
     logging.debug("ROLLBACK TEST rowcount %d count %d" % (rowcount, count))
     self.assertEqual(rowcount, count, "Fetched rows(%d) != inserted rows(%d), rollback didn't work" % (rowcount, count))
   except Exception, e:
     logging.debug("Write failed with error %s" % str(e))
     raise
Example #26
0
def restore_zk(filename):
  """ Restores Zookeeper data from a fixed file in the local FS.

  Args:
    filename: A str, the path to the temporary Zookeeper backup file.
  """
  handle = kazoo.client.KazooClient(hosts=ZK_DEFAULT_HOST)
  handle.start()
  with open(filename, 'r') as f:
    for line in f.readlines():
      pair = json.loads(line)
      path = pair.keys()[0]
      value = pair.values()[0].decode('base64')
      try:
        handle.create(path, bytes(value), makepath=True)
        logging.debug("Created '{0}'".format(path))
      except kazoo.exceptions.NodeExistsError:
        try:
          handle.set(path, bytes(value))
          logging.debug("Updated '{0}'".format(path))
        except kazoo.exceptions.BadArgumentsError:
          logging.warning("BadArgumentsError for path '{0}'".format(path))
      except kazoo.exceptions.NoNodeError:
        logging.warning("NoNodeError for path '{0}'. Parent nodes are "
          "missing".format(path))
      except kazoo.exceptions.ZookeeperError:
        logging.warning("ZookeeperError for path '{0}'".format(path))
  handle.stop()
Example #27
0
def process_users(users_file, keytab_path):

    with open(users_file) as csvfile:
        r = csv.DictReader(csvfile, delimiter=',')
        for row in r:
            if row['Name'] == '#':
                continue
            d = {
                'username': row['Name'],
                'description': row['Description'],
                'first': row['Name'],
                'last': row['Name'],
                'realm': REALM,
                'ipa_server': IPA_SERVER,
                'path_prefix': keytab_path
            }

            if not user_exists(d['username']):
                create_user(d)
                # When a user is created in IPA, his password autoexpires.
                # Need to explicitly login or set the expiration to a later date so that the keytab
                # can be retrieved and used later
                change_krb_expiration(d['username'])                    
                create_keytab(d)
                for g in BASE_GROUPS:
                    add_group(d['username'], g)
            else:
                logging.debug('User %s already exists' % d['username'])
Example #28
0
 def apply(self, ui):
     logging.debug(self.movement)
     if self.movement == 'last':
         ui.current_buffer.focus_last()
         ui.update()
     else:
         MoveCommand.apply(self, ui)
Example #29
0
File: age.py Project: xuanblo/jcvi
def traits(args):
    """
    %prog traits directory

    Make HTML page that reports eye and skin color.
    """
    p = OptionParser(traits.__doc__)
    opts, args = p.parse_args(args)

    if len(args) < 1:
        sys.exit(not p.print_help())

    samples = []
    for folder in args:
        targets = iglob(folder, "*-traits.json")
        if not targets:
            continue
        filename = targets[0]
        js = json.load(open(filename))
        js["skin_rgb"] = make_rgb(
            js["traits"]["skin-color"]["L"],
            js["traits"]["skin-color"]["A"],
            js["traits"]["skin-color"]["B"])
        js["eye_rgb"] = make_rgb(
            js["traits"]["eye-color"]["L"],
            js["traits"]["eye-color"]["A"],
            js["traits"]["eye-color"]["B"])
        samples.append(js)

    template = Template(traits_template)
    fw = open("report.html", "w")
    print >> fw, template.render(samples=samples)
    logging.debug("Report written to `{}`".format(fw.name))
    fw.close()
Example #30
0
    def upsert_edit_project(self,filepath,filename,uuid,version,desc=None,opens_with=None):
        cursor = self.conn.cursor()

        matches=re.search(u'(\.[^\.]+)$',filename)
        file_xtn=""
        if matches is not None:
            file_xtn=str(matches.group(1))
        else:
            raise ArgumentError("Filename %s does not appear to have a file extension" % filename)

        typenum=self.project_type_for_extension(file_xtn,desc=desc,opens_with=opens_with)
        #does the project entry already exist? If so leave it (avoiding database bloat)
        cursor.execute("SELECT id FROM edit_projects WHERE filename=%s AND filepath=%s", (filename, filepath, ))
        result = cursor.fetchone()
        if result is not None:
            logging.debug("Edit project {0}/{1} already exists in database, not touching it".format(filepath, filename))
            return result[0]

        try:
            cursor.execute("insert into edit_projects (filename,filepath,type,lastseen,valid) values (%s,%s,%s,now(),true) returning id", (filename,filepath,typenum))
        except psycopg2.IntegrityError as e: #this is kept in case of race conditions
            self.conn.rollback()
            cursor.execute("update edit_projects set lastseen=now(), valid=true where filename=%s and filepath=%s returning id", (filename,filepath))

        result = cursor.fetchone()
        id = result[0]

        sqlcmd="update edit_projects set uuid=%s, version=%s where id=%s"
        cursor.execute(sqlcmd, (uuid,version,id))
        self.conn.commit()
        return id
Example #31
0
 def GetDevices(self, sender, connection):
     logging.debug("GetDevices")
     return self.devices
Example #32
0
def main():
    # Parse arguments and pass through unrecognised args
    parser = argparse.ArgumentParser(
        add_help=False,
        usage='%(prog)s [test_runner.py options] [script options] [scripts]',
        description=__doc__,
        epilog='''
    Help text and arguments for individual test script:''',
        formatter_class=argparse.RawTextHelpFormatter)
    parser.add_argument(
        '--combinedlogslen',
        '-c',
        type=int,
        default=0,
        help=
        'print a combined log (of length n lines) from all test nodes and test framework to the console on failure.'
    )
    parser.add_argument(
        '--coverage',
        action='store_true',
        help='generate a basic coverage report for the RPC interface')
    parser.add_argument(
        '--exclude',
        '-x',
        help='specify a comma-separated-list of scripts to exclude.')
    parser.add_argument(
        '--extended',
        action='store_true',
        help='run the extended test suite in addition to the basic tests')
    parser.add_argument(
        '--force',
        '-f',
        action='store_true',
        help=
        'run tests even on platforms where they are disabled by default (e.g. windows).'
    )
    parser.add_argument('--help',
                        '-h',
                        '-?',
                        action='store_true',
                        help='print help text and exit')
    parser.add_argument(
        '--jobs',
        '-j',
        type=int,
        default=4,
        help='how many test scripts to run in parallel. Default=4.')
    parser.add_argument(
        '--keepcache',
        '-k',
        action='store_true',
        help=
        'the default behavior is to flush the cache directory on startup. --keepcache retains the cache from the previous testrun.'
    )
    parser.add_argument('--quiet',
                        '-q',
                        action='store_true',
                        help='only print results summary and failure logs')
    parser.add_argument('--tmpdirprefix',
                        '-t',
                        default=tempfile.gettempdir(),
                        help="Root directory for datadirs")
    args, unknown_args = parser.parse_known_args()

    # args to be passed on always start with two dashes; tests are the remaining unknown args
    tests = [arg for arg in unknown_args if arg[:2] != "--"]
    passon_args = [arg for arg in unknown_args if arg[:2] == "--"]

    # Read config generated by configure.
    config = configparser.ConfigParser()
    configfile = os.path.abspath(os.path.dirname(__file__)) + "/../config.ini"
    config.read_file(open(configfile))

    passon_args.append("--configfile=%s" % configfile)

    # Set up logging
    logging_level = logging.INFO if args.quiet else logging.DEBUG
    logging.basicConfig(format='%(message)s', level=logging_level)

    # Create base test directory
    tmpdir = "%s/underline_test_runner_%s" % (
        args.tmpdirprefix, datetime.datetime.now().strftime("%Y%m%d_%H%M%S"))
    os.makedirs(tmpdir)

    logging.debug("Temporary test directory at %s" % tmpdir)

    enable_wallet = config["components"].getboolean("ENABLE_WALLET")
    enable_utils = config["components"].getboolean("ENABLE_UTILS")
    enable_bitcoind = config["components"].getboolean("ENABLE_BITCOIND")

    if config["environment"]["EXEEXT"] == ".exe" and not args.force:
        # https://github.com/bitcoin/bitcoin/commit/d52802551752140cf41f0d9a225a43e84404d3e9
        # https://github.com/bitcoin/bitcoin/pull/5677#issuecomment-136646964
        print(
            "Tests currently disabled on Windows by default. Use --force option to enable"
        )
        sys.exit(0)

    if not (enable_wallet and enable_utils and enable_bitcoind):
        print(
            "No functional tests to run. Wallet, utils, and underlined must all be enabled"
        )
        print(
            "Rerun `configure` with -enable-wallet, -with-utils and -with-daemon and rerun make"
        )
        sys.exit(0)

    # Build list of tests
    if tests:
        # Individual tests have been specified. Run specified tests that exist
        # in the ALL_SCRIPTS list. Accept the name with or without .py extension.
        tests = [re.sub("\.py$", "", t) + ".py" for t in tests]
        test_list = []
        for t in tests:
            if t in ALL_SCRIPTS:
                test_list.append(t)
            else:
                print("{}WARNING!{} Test '{}' not found in full test list.".
                      format(BOLD[1], BOLD[0], t))
    else:
        # No individual tests have been specified.
        # Run all base tests, and optionally run extended tests.
        test_list = BASE_SCRIPTS
        if args.extended:
            # place the EXTENDED_SCRIPTS first since the three longest ones
            # are there and the list is shorter
            test_list = EXTENDED_SCRIPTS + test_list

    # Remove the test cases that the user has explicitly asked to exclude.
    if args.exclude:
        tests_excl = [
            re.sub("\.py$", "", t) + ".py" for t in args.exclude.split(',')
        ]
        for exclude_test in tests_excl:
            if exclude_test in test_list:
                test_list.remove(exclude_test)
            else:
                print("{}WARNING!{} Test '{}' not found in current test list.".
                      format(BOLD[1], BOLD[0], exclude_test))

    if not test_list:
        print(
            "No valid test scripts specified. Check that your test is in one "
            "of the test lists in test_runner.py, or run test_runner.py with no arguments to run all tests"
        )
        sys.exit(0)

    if args.help:
        # Print help for test_runner.py, then print help of the first script (with args removed) and exit.
        parser.print_help()
        subprocess.check_call([(config["environment"]["SRCDIR"] +
                                '/test/functional/' +
                                test_list[0].split()[0])] + ['-h'])
        sys.exit(0)

    check_script_list(config["environment"]["SRCDIR"])
    check_script_prefixes()

    if not args.keepcache:
        shutil.rmtree("%s/test/cache" % config["environment"]["BUILDDIR"],
                      ignore_errors=True)

    run_tests(test_list, config["environment"]["SRCDIR"],
              config["environment"]["BUILDDIR"],
              config["environment"]["EXEEXT"], tmpdir, args.jobs,
              args.coverage, passon_args, args.combinedlogslen)
Example #33
0
    def extract_arguments(self, method_arg):
        i = 0
        args = []
        isArray = False
        while i < len(method_arg):
            # class or interface
            if method_arg[i] == "L":
                if method_arg[i - 1] == "[":
                    isArray = True

                # check array
                if isArray:
                    args.append('[L' + method_arg[i + 1:method_arg.find(';')] +
                                ';')
                    isArray = False
                else:
                    args.append(method_arg[i + 1:method_arg.find(';')])

                logging.debug(' ' + method_arg[i + 1:method_arg.find(';')])

                i = method_arg.find(';') + 1
                method_arg = method_arg.replace(';', ' ', 1)

                continue

            # Int
            if method_arg[i] == "I":
                if method_arg[i - 1] == "[":
                    isArray = True

                if isArray:
                    args.append('[I')
                    isArray = False
                else:
                    args.append('int')

            # Boolean
            if method_arg[i] == "Z":
                if method_arg[i - 1] == "[":
                    isArray = True

                if isArray:
                    args.append('[Z')
                    isArray = False
                else:
                    args.append('boolean')

            # Float
            if method_arg[i] == "F":
                if method_arg[i - 1] == "[":
                    isArray = True

                if isArray:
                    args.append('[F')
                    isArray = False
                else:
                    args.append('float')

            # Long
            if method_arg[i] == "J":
                if method_arg[i - 1] == "[":
                    isArray = True

                if isArray:
                    args.append('[J')
                    isArray = False
                else:
                    args.append('long')

            # Double
            if method_arg[i] == "D":
                if method_arg[i - 1] == "[":
                    isArray = True

                if isArray:
                    args.append('[D')
                    isArray = False
                else:
                    args.append('double')

            # Char
            if method_arg[i] == "C":
                if method_arg[i - 1] == "[":
                    isArray = True

                if isArray:
                    args.append('[C')
                    isArray = False
                else:
                    args.append('char')

            # Byte
            if method_arg[i] == "B":
                if method_arg[i - 1] == "[":
                    isArray = True

                if isArray:
                    args.append('[B')
                    isArray = False
                else:
                    args.append('byte')

            # Short
            if method_arg[i] == "S":
                if method_arg[i - 1] == "[":
                    isArray = True

                if isArray:
                    args.append('[S')
                    isArray = False
                else:
                    args.append('short')

            i += 1

        return args
Example #34
0
    def create_javascript(self, smali_file_path):

        javascript = "Java.perform(function() {\n"

        with open(smali_file_path, 'r') as smali_file:

            # JavaScript
            logging.debug(' ' + smali_file_path + '\n')

            # read by line
            smali_file_lines = smali_file.readlines()

            for smali_file_line in smali_file_lines:
                # find class name
                if smali_file_line.split(' ')[0] == ".class":
                    # get full class name
                    smali_file_line = smali_file_line.split(' ')
                    smali_file_line.reverse()
                    full_class_name = smali_file_line[0][1:]
                    full_class_name = full_class_name[:len(full_class_name) -
                                                      2].replace('/', '.')

                    logging.debug(" full_class_name: " + full_class_name)

                    # get class name
                    class_name = full_class_name.split('.')
                    class_name.reverse()
                    class_name = class_name[0]

                    logging.debug(" class_name: " + class_name)

                    # declare class
                    javascript += '    var ' + class_name + ' = Java.use(\'' + full_class_name + '\');\n\n'
                    continue

                # find .method
                if smali_file_line.split(' ')[0] == ".method":
                    # constructor hook
                    if 'constructor' in smali_file_line and \
                     not 'static' in smali_file_line:

                        args = smali_file_line.split(' ').pop()
                        args = args[args.find('(') + 1:args.rfind(')')]
                        args_list = self.extract_arguments(args)
                        #print(args_list)
                        args_len = len(args_list)

                        args_string = ''
                        args_quota_added = []
                        for i in range(args_len):
                            # replace / to .
                            args_list[i] = args_list[i].replace('/', '.')
                            args_quota_added.append('\'' + args_list[i] + '\'')

                            # arg string create ex) arg0,arg1
                            if i == args_len - 1:
                                args_string += 'arg' + str(i)
                            else:
                                args_string += 'arg' + str(i) + ','

                        javascript += '    ' + class_name + '.$init.overload(' + ','.join(
                            args_quota_added
                        ) + ').implementation = function(' + args_string + ') {\n'
                        javascript += '        console.log(\'[Constructor] ' + full_class_name + '(' + ','.join(
                            args_list) + ')\');\n'
                        for i in range(args_len):
                            javascript += '        console.log(\'    [arg' + str(
                                i) + '] \' + arg' + str(i) + ');\n'
                        javascript += '        return this.$init(' + args_string + ');\n'
                        javascript += '    };\n\n'

                    # method hook
                    elif not 'constructor' in smali_file_line:
                        logging.debug("")
                        method_line_list = smali_file_line.split(' ')
                        method_line_list.reverse()
                        method_line = method_line_list[0]

                        # extract method
                        method_name = method_line.split('(')[0]
                        method_arg = method_line.split('(')[1].split(')')[0]
                        method_ret_type = method_line.split(')')[1].split(
                            '\n')[0]

                        logging.debug(" method_name: " + method_name)
                        logging.debug(" method_arg: " + method_arg)
                        logging.debug(" method_ret_type: " + method_ret_type)

                        # args extract
                        if len(method_arg) == 0:  # args is not exist
                            javascript += '    ' + class_name + '.' + method_name + '.overload().implementation = function(){\n'
                            javascript += '        console.log(\'[Method] ' + full_class_name + '.' + method_name + '()\');\n'

                            # create retval
                            javascript += '        var retval = this.' + method_name + '();\n'

                            # logging.info return
                            if method_ret_type != 'V':
                                javascript += '        console.log(\'    [ret] \' + retval);\n'

                            # return method
                            javascript += '        return retval;\n'

                            javascript += '    };\n\n'
                            continue

                        else:  # args exist
                            args_list = self.extract_arguments(method_arg)

                            logging.debug(" args_list: " + str(args_list))

                            args_string = ''
                            args_len = len(args_list)
                            args_quota_added = []

                            for i in range(args_len):
                                # replace / to .
                                args_list[i] = args_list[i].replace('/', '.')
                                args_quota_added.append('\'' + args_list[i] +
                                                        '\'')
                                # arg string create
                                args_string += 'arg' + str(i)
                                # if last arg
                                if i != args_len - 1:
                                    args_string += ','

                        javascript += '    ' + class_name + '.' + method_name + '.overload(' + ','.join(
                            args_quota_added
                        ) + ').implementation = function(' + args_string + '){\n'

                        # print hook method name
                        javascript += '        console.log(\'[Method] ' + full_class_name + '.' + method_name + '(' + ','.join(
                            args_list) + ')\');\n'

                        # print args
                        for i in range(args_len):
                            javascript += '        console.log(\'    [arg' + str(
                                i) + '] \' + arg' + str(i) + ');\n'

                        # create retval
                        javascript += '        var retval = this.' + method_name + '(' + args_string + ');\n'

                        # print ret
                        if method_ret_type != 'V':
                            javascript += '        console.log(\'    [ret] \' + retval);\n'

                        # return method
                        javascript += '        return retval;\n'

                        javascript += '    };\n\n'

        javascript += "});\n"
        return javascript
Example #35
0
        sys.exit(1)

    ssrcObj = Proxy(("PYRO:{}@{}:{}").format(
        SSRC.DAEMON.PYRO_OBJECT_ID,
        SSRC.DAEMON.PYRO_HOST,
        SSRC.DAEMON.PYRO_PORT))
    try:
        if(options.shutdown):
            ssrcObj.Exit()
        elif(options.status):
            print("{:.2f}".format(ssrcObj.Status()))
        else:
            if(options.in_band):
                if(options.hot):
                    ssrcObj.Adjust(SSRC.TemperatureState.WARM)
                    logging.debug(
                        "Adjust down 1% to {:.2f}%".format(ssrcObj.Status()))
                elif(options.cold):
                    ssrcObj.Adjust(SSRC.TemperatureState.COOL)
                    logging.debug(
                        "Adjust up 0.25% to {:.2f}%".format(ssrcObj.Status()))
                else:
                    ssrcObj.Adjust(SSRC.TemperatureState.PERFECT)
                    logging.debug(
                        "Stay at {:.2f}%".format(ssrcObj.Status()))
            else:
                if(options.hot):
                    ssrcObj.Adjust(SSRC.TemperatureState.HOT)
                    logging.debug(
                        "Adjust down 20% to {:.2f}%".format(ssrcObj.Status()))
                elif(options.cold):
                    ssrcObj.Adjust(SSRC.TemperatureState.COLD)
Example #36
0
import openpyxl, logging
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s')
from openpyxl.styles import Font
from openpyxl.utils import get_column_letter

wb = openpyxl.Workbook()
logging.debug(wb.worksheets)
wb.create_sheet(title='Tables',index=0)
logging.debug(wb.worksheets)
sheet = wb.worksheets[0]

#First row Frozen and BOLD
fontObj = Font(name='Times New Roman', bold=True)
for colNum in range(1,10):
    sheet.column_dimensions[get_column_letter(colNum)].width = 15
    sheet.cell(row=1, column=colNum).font = fontObj
    sheet.cell(row=1,column=colNum).value = "Number = " + str(colNum)
sheet = wb.active
sheet.freeze_panes = 'a2'

#Rest of the rows (tables upto 20)
for colNum in range(1,sheet.max_column+1):
    for rowNum in range(2,22):
        sheet.cell(row = rowNum, column = colNum).value = (rowNum-1)*colNum

#merge last row
sheet.merge_cells('a23:i23')

#SUM function for all the tables
for colNum in range(1,10):
    colAlpha = get_column_letter(colNum)
Example #37
0
File: nm.py Project: sdarses/Qt
def _SubMain(log_level, tool_prefix, output_directory):
  logging.basicConfig(
      level=int(log_level),
      format='nm: %(levelname).1s %(relativeCreated)6d %(message)s')
  bulk_analyzer = _BulkObjectFileAnalyzerWorker(tool_prefix, output_directory)
  while True:
    payload_len = int(sys.stdin.read(8) or '0', 16)
    if not payload_len:
      logging.debug('nm bulk subprocess received eof.')
      break
    paths = sys.stdin.read(payload_len).split('\x01')
    bulk_analyzer.AnalyzePaths(paths)

  bulk_analyzer.Close()
  paths_by_name = bulk_analyzer.Get()
  encoded_keys, encoded_values = concurrent.EncodeDictOfLists(paths_by_name)
  try:
    sys.stdout.write('%08x' % len(encoded_keys))
    sys.stdout.write(encoded_keys)
    sys.stdout.write(encoded_values)
  except IOError, e:
    # Parent process exited.
    if e.errno == errno.EPIPE:
      sys.exit(1)

  logging.debug('nm bulk subprocess finished.')


if __name__ == '__main__':
  _SubMain(*sys.argv[1:])
Example #38
0
def run_tests(test_list,
              src_dir,
              build_dir,
              exeext,
              tmpdir,
              jobs=1,
              enable_coverage=False,
              args=[],
              combined_logs_len=0):
    # Warn if bitcoind is already running (unix only)
    try:
        if subprocess.check_output(["pidof", "underlined"]) is not None:
            print(
                "%sWARNING!%s There is already a underlined process running on this system. Tests may fail unexpectedly due to resource contention!"
                % (BOLD[1], BOLD[0]))
    except (OSError, subprocess.SubprocessError):
        pass

    # Warn if there is a cache directory
    cache_dir = "%s/test/cache" % build_dir
    if os.path.isdir(cache_dir):
        print(
            "%sWARNING!%s There is a cache directory here: %s. If tests fail unexpectedly, try deleting the cache directory."
            % (BOLD[1], BOLD[0], cache_dir))

    #Set env vars
    if "LITECOIND" not in os.environ:
        os.environ["LITECOIND"] = build_dir + '/src/underlined' + exeext
        os.environ["LITECOINCLI"] = build_dir + '/src/underline-cli' + exeext

    tests_dir = src_dir + '/test/functional/'

    flags = ["--srcdir={}/src".format(build_dir)] + args
    flags.append("--cachedir=%s" % cache_dir)

    if enable_coverage:
        coverage = RPCCoverage()
        flags.append(coverage.flag)
        logging.debug("Initializing coverage directory at %s" % coverage.dir)
    else:
        coverage = None

    if len(test_list) > 1 and jobs > 1:
        # Populate cache
        try:
            subprocess.check_output([tests_dir + 'create_cache.py'] + flags +
                                    ["--tmpdir=%s/cache" % tmpdir])
        except subprocess.CalledProcessError as e:
            sys.stdout.buffer.write(e.output)
            raise

    #Run Tests
    job_queue = TestHandler(jobs, tests_dir, tmpdir, test_list, flags)
    time0 = time.time()
    test_results = []

    max_len_name = len(max(test_list, key=len))

    for _ in range(len(test_list)):
        test_result, testdir, stdout, stderr = job_queue.get_next()
        test_results.append(test_result)

        if test_result.status == "Passed":
            logging.debug(
                "\n%s%s%s passed, Duration: %s s" %
                (BOLD[1], test_result.name, BOLD[0], test_result.time))
        elif test_result.status == "Skipped":
            logging.debug("\n%s%s%s skipped" %
                          (BOLD[1], test_result.name, BOLD[0]))
        else:
            print("\n%s%s%s failed, Duration: %s s\n" %
                  (BOLD[1], test_result.name, BOLD[0], test_result.time))
            print(BOLD[1] + 'stdout:\n' + BOLD[0] + stdout + '\n')
            print(BOLD[1] + 'stderr:\n' + BOLD[0] + stderr + '\n')
            if combined_logs_len and os.path.isdir(testdir):
                # Print the final `combinedlogslen` lines of the combined logs
                print('{}Combine the logs and print the last {} lines ...{}'.
                      format(BOLD[1], combined_logs_len, BOLD[0]))
                print('\n============')
                print('{}Combined log for {}:{}'.format(
                    BOLD[1], testdir, BOLD[0]))
                print('============\n')
                combined_logs, _ = subprocess.Popen(
                    [
                        os.path.join(tests_dir, 'combine_logs.py'), '-c',
                        testdir
                    ],
                    universal_newlines=True,
                    stdout=subprocess.PIPE).communicate()
                print("\n".join(
                    deque(combined_logs.splitlines(), combined_logs_len)))

    print_results(test_results, max_len_name, (int(time.time() - time0)))

    if coverage:
        coverage.report_rpc_coverage()

        logging.debug("Cleaning up coverage data")
        coverage.cleanup()

    # Clear up the temp directory if all subdirectories are gone
    if not os.listdir(tmpdir):
        os.rmdir(tmpdir)

    all_passed = all(
        map(lambda test_result: test_result.was_successful, test_results))

    sys.exit(not all_passed)
Example #39
0
def init(args):
    encoding = args.get('--encoding')
    extra_ignore_dirs = args.get('--ignore')
    follow_links = not args.get('--no-follow-links')
    input_path = args['<path>']
    if input_path is None:
        input_path = os.path.abspath(os.curdir)

    if extra_ignore_dirs:
        extra_ignore_dirs = extra_ignore_dirs.split(',')

    candidates = get_all_imports(input_path,
                                 encoding=encoding,
                                 extra_ignore_dirs=extra_ignore_dirs,
                                 follow_links=follow_links)
    candidates = get_pkg_names(candidates)
    logging.debug("Found imports: " + ", ".join(candidates))
    pypi_server = "https://pypi.python.org/pypi/"
    proxy = None
    if args["--pypi-server"]:
        pypi_server = args["--pypi-server"]

    if args["--proxy"]:
        proxy = {'http': args["--proxy"], 'https': args["--proxy"]}

    if args["--use-local"]:
        logging.debug(
            "Getting package information ONLY from local installation.")
        imports = get_import_local(candidates, encoding=encoding)
    else:
        logging.debug("Getting packages information from Local/PyPI")
        local = get_import_local(candidates, encoding=encoding)
        # Get packages that were not found locally
        difference = [x for x in candidates
                      if x.lower() not in [z['name'].lower() for z in local]]
        imports = local + get_imports_info(difference,
                                           proxy=proxy,
                                           pypi_server=pypi_server)

    path = (args["--savepath"] if args["--savepath"] else
            os.path.join(input_path, "requirements.txt"))

    if args["--diff"]:
        diff(args["--diff"], imports)
        return

    if args["--clean"]:
        clean(args["--clean"], imports)
        return

    if (not args["--print"]
            and not args["--savepath"]
            and not args["--force"]
            and os.path.exists(path)):
        logging.warning("Requirements.txt already exists, "
                        "use --force to overwrite it")
        return

    if args.get('--no-pin'):
        imports = [{'name': item["name"], 'version': ''} for item in imports]

    if args["--print"]:
        output_requirements(imports)
        logging.info("Successfully output requirements")
    else:
        generate_requirements_file(path, imports)
        logging.info("Successfully saved requirements file in " + path)
Example #40
0
def get_all_imports(
        path, encoding=None, extra_ignore_dirs=None, follow_links=True):
    imports = set()
    raw_imports = set()
    candidates = []
    ignore_errors = False
    ignore_dirs = [".hg", ".svn", ".git", ".tox", "__pycache__", "env", "venv"]

    if extra_ignore_dirs:
        ignore_dirs_parsed = []
        for e in extra_ignore_dirs:
            ignore_dirs_parsed.append(os.path.basename(os.path.realpath(e)))
        ignore_dirs.extend(ignore_dirs_parsed)

    walk = os.walk(path, followlinks=follow_links)
    for root, dirs, files in walk:
        dirs[:] = [d for d in dirs if d not in ignore_dirs]

        candidates.append(os.path.basename(root))
        files = [fn for fn in files if os.path.splitext(fn)[1] == ".py"]

        candidates += [os.path.splitext(fn)[0] for fn in files]
        for file_name in files:
            file_name = os.path.join(root, file_name)
            with open_func(file_name, "r", encoding=encoding) as f:
                contents = f.read()
            try:
                tree = ast.parse(contents)
                for node in ast.walk(tree):
                    if isinstance(node, ast.Import):
                        for subnode in node.names:
                            raw_imports.add(subnode.name)
                    elif isinstance(node, ast.ImportFrom):
                        raw_imports.add(node.module)
            except Exception as exc:
                if ignore_errors:
                    traceback.print_exc(exc)
                    logging.warn("Failed on file: %s" % file_name)
                    continue
                else:
                    logging.error("Failed on file: %s" % file_name)
                    raise exc

    # Clean up imports
    for name in [n for n in raw_imports if n]:
        # Sanity check: Name could have been None if the import
        # statement was as ``from . import X``
        # Cleanup: We only want to first part of the import.
        # Ex: from django.conf --> django.conf. But we only want django
        # as an import.
        cleaned_name, _, _ = name.partition('.')
        imports.add(cleaned_name)

    packages = imports - (set(candidates) & imports)
    logging.debug('Found packages: {0}'.format(packages))

    with open(join("stdlib"), "r") as f:
        data = {x.strip() for x in f}

    data = {x for x in data if x not in py2_exclude} if py2 else data
    return list(packages - data)
Example #41
0
#! python3
# mapIt.py - Launches a map in the browser using an address from the
# command line or clipboard.
import logging
logging.disable(logging.CRITICAL)  # comment to enable debug logging output
logging.basicConfig(level=logging.DEBUG,
                    format='%(asctime)s- %(levelname)s- %(message)s')
import webbrowser, sys, pyperclip
logging.debug('sys.argv variable is (%s)' % sys.argv)
if len(sys.argv) > 1:
    # Get address from command line
    address = ' '.join(sys.argv[1:])
    logging.debug('Address entered is (%s)' % address)
else:
    # Get address from clipboard
    address = pyperclip.paste()
    logging.debug('Address copied from clipboard is (%s)' % address)

webbrowser.open('https://www.google.com/maps/place/' + address)
Example #42
0
             if v not in d:
                 print('%s not key in map file %s' % (v,sMapFile))
             dMapper[k] = d[v] if v in d else dMapper[k]
     return dMapper,dMasterPtrn

def ArgParser():
    parser = argparse.ArgumentParser(description='This code for converting ctm alignment file to praat TextGrid file', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('ctm_dir',  help='The path to the directory contains ctm files', type=str)
    parser.add_argument('phones',  help='The path to the phones file that mapping between int to symbol', type=str)
    parser.add_argument('out_dir', help='The path to store TextGrid files', type=str)
    parser.add_argument('-l', '--lexicon', help='The lexicon file mapping between words tp phoneme sequence', dest='lexicon', type=str, default='')
    parser.add_argument('-m', '--phMap', help='Mapping phoneme symbols to other symbols (i.e samba to ipa)', dest='phMap', type=str, default='')
    return parser.parse_args()


if __name__ == '__main__':
    args = ArgParser()
    sCtmDir, sPhonesFile, sOutDir = args.ctm_dir, args.phones, args.out_dir
    sPhoneMapFile = args.phMap
    #Regular expression to remove the position suffix from the key of dictionary
    ptrn_pos = re.compile('_[BIES]')
    dMapper,rPtrn = prepare_mapper(((sPhonesFile,None),(sPhoneMapFile,ptrn_pos)))
    log.info("Mapper have been created from %s" % ' '.join((sPhonesFile,sPhoneMapFile)))
    lCtmFiles = glob.glob(join(sCtmDir,'*.ctm'))
    log.info("%d cmt files found in %s" % (len(lCtmFiles),sCtmDir))
    dAlign = defaultdict(list)
    for fCtm in lCtmFiles:
        log.debug("Now processing %s" % fCtm)
        loadCMT(fCtm,dAlign)
    Generate_TxtGrid(sOutDir, dAlign, dMapper=dMapper, cSpkrIdDil='-', sSuffix = '',bWordTier = False)
Example #43
0
        frame = "fk5"

    if results.pos:
        ra, dec = results.pos
        pos = SkyCoord([ra] * u.degree, [dec] * u.degree, frame=frame)
        log.info(os.path.join(datadir, "Halpha_error.fits"))
        sm = SM(
            ha_file=os.path.join(datadir, "Halpha_map.fits"),
            err_file=os.path.join(datadir, "Halpha_error.fits"),
            nu=nu,
            log=log,
            d=d,
            v=v,
        )
        if results.halpha:
            logging.debug(sm.get_halpha(pos))
            val, err = sm.get_halpha(pos)
            print("Halpha: ", val, "(Rayleighs)")
            print("err_Halpha: ", err, "(Rayleighs)")
        if results.xi:
            val, err = sm.get_xi(pos)
            print("xi: ", val)
            print("err_xi: ", err)
        if results.sm:
            val, err = sm.get_sm(pos)
            print("sm: ", val, "kpc m^{-20/3}")
            print("err_sm: ", err, "kpc m^{-20/3}")
        if results.m:
            val, err = sm.get_m(pos)
            print("m: ", val * 100, "%")
            print("err_m: ", err * 100, "%")
Example #44
0
 def __init__(self):
     self.__dbpool = psycopg2.pool.ThreadedConnectionPool(self.__minconn, self.__maxconn, database=self.__database,
                                                          user=self.__dbuser,
                                                          password=self.__password,
                                                          host=self.__host, port=self.__port)
     logging.debug("Opened database Pool successfully")
Example #45
0
def parse_response(response, debug=False):
    data = response.json()

    if debug:
        logging.debug('REQUEST: %s' % response.request.url)
        logging.debug('REQUEST_HEADERS: %s' % response.request.headers)
        logging.debug('REQUEST_CONTENT: %s' % response.request.body)

        logging.debug('RESPONSE: %s' % response.content)
        logging.debug('RESP_HEADERS: %s' % response.headers)
        logging.debug('RESP_CODE: %s' % response.status_code)

    return data
Example #46
0
 def printing(self):
     print("inside the metaDB print function")
     for eachMetaRow in self.tagsList:
         logging.debug("%s, %s, %s, %s, %s ", eachMetaRow.timestamp, eachMetaRow.filename, eachMetaRow.startline, eachMetaRow.endline, eachMetaRow.keywords)
Example #47
0
def run_process(args):
    """Process qrc files."""

    import inspect
    import qrainbowstyle.palette as source

    palettes = []
    for name, obj in inspect.getmembers(source):
        if inspect.isclass(obj) and issubclass(
                obj, source.BasePalette) and obj is not source.BasePalette:
            palettes.append(obj)

    logging.debug("Found palettes: " + str(palettes))

    for palette in palettes:
        palette_name = str(palette.__name__)
        logging.debug("Generating files for: " + palette_name)

        # create directory for every style in palette.py
        os.chdir(STYLES_PATH)
        os.makedirs(palette_name, exist_ok=True)
        os.chdir(os.path.join(STYLES_PATH, palette_name))
        open("__init__.py", "w+").close()

        output_dir = os.path.join(STYLES_PATH, palette_name)

        # get paths to output directories for this palette
        images_dir = os.path.join(output_dir, 'images')
        rc_dir = os.path.join(output_dir, 'rc')
        # qss_dir = os.path.join(output_dir, 'qss')

        # create directories
        os.makedirs(images_dir, exist_ok=True)
        os.makedirs(rc_dir, exist_ok=True)
        # os.makedirs(qss_dir, exist_ok=True)

        qrc_filepath = os.path.join(output_dir, QRC_FILE)
        qss_filepath = os.path.join(output_dir, QSS_FILE)
        # variables_scss_filepath = os.path.join(qss_dir, VARIABLES_SCSS_FILE)

        # Create palette and resources png images
        logging.debug('Generating palette image ...')
        create_palette_image(palette=palette, path=images_dir)

        logging.debug('Generating images ...')
        create_images(palette=palette, rc_path=rc_dir)

        logging.debug("Generating images for titlebar buttons")
        create_titlebar_images(rc_path=rc_dir, palette=palette)

        logging.debug('Generating qrc ...')
        generate_qrc_file(rc_path=rc_dir, qrc_path=qrc_filepath)

        logging.debug('Converting .qrc to _rc.py and/or .rcc ...')

        for qrc_file in glob.glob('*.qrc'):
            # get name without extension
            filename = os.path.splitext(qrc_file)[0]

            logging.debug(filename + '...')
            ext = '_rc.py'
            ext_c = '.rcc'

            # Create variables SCSS files and compile SCSS files to QSS
            logging.debug('Compiling SCSS/SASS files to QSS ...')
            create_qss(palette=palette, qss_filepath=qss_filepath)

            # creating names
            py_file_pyqt5 = 'pyqt5_' + filename + ext
            py_file_pyqt = 'pyqt_' + filename + ext
            py_file_pyside = 'pyside_' + filename + ext
            py_file_pyside2 = 'pyside2_' + filename + ext
            py_file_qtpy = '' + filename + ext
            py_file_pyqtgraph = 'pyqtgraph_' + filename + ext

            # append palette used to generate this file
            used_palette = "\nfrom qrainbowstyle.palette import " + palette.__name__ + "\npalette = " + palette.__name__ + "\n"

            # calling external commands
            if args.create in ['pyqt', 'pyqtgraph', 'all']:
                logging.debug("Compiling for PyQt4 ...")
                try:
                    call(['pyrcc4', '-py3', qrc_file, '-o', py_file_pyqt])
                    with open(py_file_pyqt, "a+") as f:
                        f.write(used_palette)

                except FileNotFoundError:
                    logging.debug("You must install pyrcc4")

            if args.create in ['pyqt5', 'qtpy', 'all']:
                logging.debug("Compiling for PyQt5 ...")
                try:
                    call(['pyrcc5', qrc_file, '-o', py_file_pyqt5])
                    with open(py_file_pyqt5, "a+") as f:
                        f.write(used_palette)
                except FileNotFoundError:
                    logging.debug("You must install pyrcc5")

            if args.create in ['pyside', 'all']:
                logging.debug("Compiling for PySide ...")
                try:
                    call(
                        ['pyside-rcc', '-py3', qrc_file, '-o', py_file_pyside])
                    with open(py_file_pyside, "a+") as f:
                        f.write(used_palette)
                except FileNotFoundError:
                    logging.debug("You must install pyside-rcc")

            if args.create in ['pyside2', 'all']:
                logging.debug("Compiling for PySide 2...")
                try:
                    call([
                        'pyside2-rcc', '-py3', qrc_file, '-o', py_file_pyside2
                    ])
                    with open(py_file_pyside2, "a+") as f:
                        f.write(used_palette)
                except FileNotFoundError:
                    logging.debug("You must install pyside2-rcc")

            if args.create in ['qtpy', 'all']:
                logging.debug("Compiling for QtPy ...")
                # special case - qtpy - syntax is PyQt5
                with open(py_file_pyqt5, 'r') as file:
                    filedata = file.read()

                # replace the target string
                filedata = filedata.replace('from PyQt5', 'from qtpy')

                with open(py_file_qtpy, 'w+') as file:
                    # write the file out again
                    file.write(filedata)

                if args.create not in ['pyqt5']:
                    os.remove(py_file_pyqt5)

            if args.create in ['pyqtgraph', 'all']:
                logging.debug("Compiling for PyQtGraph ...")
                # special case - pyqtgraph - syntax is PyQt4
                with open(py_file_pyqt, 'r') as file:
                    filedata = file.read()

                # replace the target string
                filedata = filedata.replace('from PyQt4', 'from pyqtgraph.Qt')

                with open(py_file_pyqtgraph, 'w+') as file:
                    # write the file out again
                    file.write(filedata)
Example #48
0
    def createFile(self, subtitle):
        '''pass the URL of the sub and the file it matches, will unzip it
		and return the path to the created file'''
        subpage = subtitle["page"]
        page = urllib2.urlopen(subpage)
        soup = BeautifulSoup(page)

        dlhref = soup.find("div", {"class": "download"}).find("a")["href"]
        subtitle["link"] = "http://subscene.com" + dlhref.split('"')[7]
        format = "zip"
        archivefilename = subtitle["filename"].rsplit(".", 1)[0] + '.' + format
        self.downloadFile(subtitle["link"], archivefilename)
        subtitlefilename = None

        if zipfile.is_zipfile(archivefilename):
            logging.debug("Unzipping file " + archivefilename)
            zf = zipfile.ZipFile(archivefilename, "r")
            for el in zf.infolist():
                extension = el.orig_filename.rsplit(".", 1)[1]
                if extension in ("srt", "sub", "txt"):
                    subtitlefilename = srtbasefilename + "." + extension
                    outfile = open(subtitlefilename, "wb")
                    outfile.write(zf.read(el.orig_filename))
                    outfile.flush()
                    outfile.close()
                else:
                    logging.info("File %s does not seem to be valid " %
                                 el.orig_filename)
            # Deleting the zip file
            zf.close()
            os.remove(archivefilename)
            return subtitlefilename
        elif archivefilename.endswith('.rar'):
            logging.warn(
                'Rar is not really supported yet. Trying to call unrar')
            import subprocess
            try:
                args = ['unrar', 'lb', archivefilename]
                output = subprocess.Popen(
                    args, stdout=subprocess.PIPE).communicate()[0]
                for el in output.splitlines():
                    extension = el.rsplit(".", 1)[1]
                    if extension in ("srt", "sub"):
                        args = [
                            'unrar', 'e', archivefilename, el,
                            os.path.dirname(archivefilename)
                        ]
                        subprocess.Popen(args)
                        tmpsubtitlefilename = os.path.join(
                            os.path.dirname(archivefilename), el)
                        subtitlefilename = os.path.join(
                            os.path.dirname(archivefilename),
                            srtbasefilename + "." + extension)
                        if os.path.exists(tmpsubtitlefilename):
                            # rename it to match the file
                            os.rename(tmpsubtitlefilename, subtitlefilename)
                            # exit
                        return subtitlefilename
            except OSError, e:
                logging.error("Execution failed: %s" % e)
                return None
Example #49
0
    def collectWMBSInfo(self):
        """
        Fetches WMBS job information.
        In addition to WMBS, also collects RunJob info from BossAir
        :return: dict with the number of jobs in each status
        """
        logging.info("Getting wmbs job info ...")
        results = {}
        
        start = int(time.time())
        # first retrieve the site thresholds
        results['thresholds'] = self.wmagentDB.getJobSlotInfo()
        logging.debug("Running and pending site thresholds: %s", results['thresholds'])

        # now fetch the amount of jobs in each state and the amount of created
        # jobs grouped by task
        results.update(self.wmagentDB.getAgentMonitoring())
        end = int(time.time())
        #adding total query time
        results["total_query_time"] = end - start
        
        logging.debug("Total number of jobs in WMBS sorted by status: %s", results['wmbsCountByState'])
        logging.debug("Total number of 'created' jobs in WMBS sorted by type: %s", results['wmbsCreatedTypeCount'])
        logging.debug("Total number of 'executing' jobs in WMBS sorted by type: %s", results['wmbsExecutingTypeCount'])

        logging.debug("Total number of active jobs in BossAir sorted by status: %s", results['activeRunJobByStatus'])
        logging.debug("Total number of complete jobs in BossAir sorted by status: %s", results['completeRunJobByStatus'])

        logging.debug("Available slots thresholds to pull work from GQ to LQ: %s", results['thresholdsGQ2LQ'])
        logging.debug("List of jobs pending for each site, sorted by priority: %s", results['sitePendCountByPrio'])

        return results
Example #50
0
 def on_modified(self, event):
     """Handle file system events."""
     if event.src_path.endswith('.qss'):
         run_process(self.args)
         logging.debug('\n')
Example #51
0
def setStatusFile(api:to_api.API) -> bool:
	"""
	Attempts to set the status file according to this server's reported status in Traffic Ops.

	.. warning:: This will create the directory '/opt/ORTstatus' if it does not exist, and may
		delete files there without warning!

	:param api: A :class:`traffic_ops_ort.to_api.API` object to use when interacting with Traffic Ops
	:returns: whether or not the status file could be set properly
	"""
	global STATUS_FILE_DIR
	from .configuration import MODE, Modes
	from . import utils
	logging.info("Setting status file")

	if not isinstance(MODE, Modes):
		logging.error("MODE is not set to a valid Mode (from traffic_ops_ort.configuration.Modes)!")
		return False

	try:
		myStatus = api.getMyStatus()
	except ConnectionError as e:
		logging.error("Failed to set status file - Traffic Ops connection failed")
		return False

	if not os.path.isdir(STATUS_FILE_DIR):
		logging.warning("status directory does not exist, creating...")
		doMakeDir = MODE is not Modes.REPORT

		# Check for user confirmation if in 'INTERACTIVE' mode
		if doMakeDir and (MODE is not Modes.INTERACTIVE or\
		   utils.getYesNoResponse("Create status directory '%s'?" % STATUS_FILE_DIR, default='Y')):
			try:
				os.makedirs(STATUS_FILE_DIR)
				return False
			except OSError as e:
				logging.error("Failed to create status directory '%s' - %s", STATUS_FILE_DIR, e)
				logging.debug("%s", e, exc_info=True, stack_info=True)
				return False
	else:
		try:
			deleteOldStatusFiles(myStatus, api)
		except ConnectionError as e:
			logging.error("Failed to delete old status files - Traffic Ops connection failed.")
			logging.debug("%s", e, exc_info=True, stack_info=True)
			return False
		except OSError as e:
			logging.error("Failed to delete old status files - %s", e)
			logging.debug("%s", e, exc_info=True, stack_info=True)
			return False

	fname = os.path.join(STATUS_FILE_DIR, myStatus)
	if not os.path.isfile(fname):
		logging.info("File '%s' to be created", fname)
		if MODE is not Modes.REPORT and\
		  (MODE is not Modes.INTERACTIVE or utils.getYesNoResponse("Create file '%s'?", 'y')):

			try:
				with open(fname, 'x'):
					pass
			except OSError as e:
				logging.error("Failed to create status file - %s", e)
				logging.debug("%s", e, exc_info=True, stack_info=True)
				return False

	return True
Example #52
0
def run() -> int:
	"""
	This function is the entrypoint into the script's main flow from :func:`traffic_ops_ort.doMain`
	It runs the appropriate actions depending on the run mode

	:returns: an exit code for the script
	"""
	from . import configuration, utils, services

	try:
		api = to_api.API(configuration.USERNAME, configuration.PASSWORD, configuration.TO_HOST,
		                 configuration.HOSTNAME[0], configuration.TO_PORT, configuration.VERIFY,
		                 configuration.TO_USE_SSL)
	except (LoginError, OperationError) as e:
		logging.critical("Failed to authenticate with Traffic Ops")
		logging.error(e)
		logging.debug("%r", e, exc_info=True, stack_info=True)
		return 1

	# If this is just a revalidation, then we can exit if there's no revalidation pending
	if configuration.MODE == configuration.Modes.REVALIDATE:
		try:
			updateRequired = revalidateState(api)
		except ORTException as e:
			logging.debug("%r", e, exc_info=True, stack_info=True)
			return 2

		if not updateRequired:
			logging.info("No revalidation pending")
			return 0

		logging.info("in REVALIDATE mode; skipping package/service processing")

	# In all other cases, we check for an update to the Delivery Service and apply any found
	# changes
	else:
		try:
			updateRequired = syncDSState(api)
		except ORTException as e:
			logging.debug("%r", e, exc_info=True, stack_info=True)
			return 2

		# Bail on failures - unless this script is BADASS!
		if not setStatusFile(api):
			if configuration.MODE is not configuration.Modes.BADASS:
				logging.critical("Failed to set status as specified by Traffic Ops")
				return 2
			logging.warning("Failed to set status but we're BADASS, so moving on.")

		logging.info("\nProcessing Packages...")
		if not processPackages(api):
			logging.critical("Failed to process packages")
			if configuration.MODE is not configuration.Modes.BADASS:
				return 2
			logging.warning("Package processing failed but we're BADASS, so attempting to move on")
		logging.info("Done.\n")

		logging.info("\nProcessing Services...")
		if not processServices(api):
			logging.critical("Failed to process services.")
			if configuration.MODE is not configuration.Modes.BADASS:
				return 2
			logging.warning("Service processing failed but we're BADASS, so attempting to move on")
		logging.info("Done.\n")


	# All modes process configuration files
	logging.info("\nProcessing Configuration Files...")
	if not processConfigurationFiles(api):
		logging.critical("Failed to process configuration files.")
		return 2
	logging.info("Done.\n")

	if updateRequired:
		if configuration.MODE is not configuration.Modes.INTERACTIVE or\
		   utils.getYesNoResponse("Update Traffic Ops?", default='Y'):

			logging.info("\nUpdating Traffic Ops...")
			api.updateTrafficOps()
			logging.info("Done.\n")
		else:
			logging.warning("Traffic Ops was not notified of changes. You should do this manually.")

		return 0

	logging.info("Traffic Ops update not necessary")

	if services.NEEDED_RELOADS and not services.doReloads():
		logging.critical("Failed to reload all configuration changes")
		return 2

	return 0
Example #53
0
def ground(problem, plagent, exp_signs=None):
    domain = problem.domain
    actions = domain.actions.values()
    predicates = domain.predicates.values()
    constraints = problem.constraints

    # Objects
    objects = problem.objects
    objects.update(domain.constants)
    logging.debug('Objects:\n%s' % objects)

    # Remove old type_map
    if exp_signs:
        objects = _update_exp_signs(exp_signs, objects)

    # Create a map from types to objects
    type_map = _create_type_map(objects)
    logging.debug("Type to object map:\n%s" % type_map)

    # Create type subtype map
    subtype_map = _create_subtype(domain.types)

    obj_signifs = {}
    # add agents, because on significances they have a inner link to I and They signs
    obj_means = {}

    # Check logic in exp
    if exp_signs:
        signs = exp_signs
        I_sign = signs['I']
        They_sign = signs['They']
        obj_means[I_sign] = I_sign.meanings[1]
        obj_signifs[I_sign] = I_sign.significances[1]
        obj_means[They_sign] = They_sign.meanings[1]
        obj_signifs[They_sign] = They_sign.significances[1]
        signs['situation'] = exp_signs['situation']
    else:
        signs = {}
        I_sign = Sign("I")
        obj_means[I_sign] = I_sign.add_meaning()
        obj_signifs[I_sign] = I_sign.add_significance()
        signs[I_sign.name] = I_sign
        They_sign = Sign("They")
        obj_means[They_sign] = They_sign.add_meaning()
        obj_signifs[They_sign] = They_sign.add_significance()
        signs[They_sign.name] = They_sign
        signs['situation'] = Sign('situation')

    for obj in objects:
        obj_sign = Sign(obj)
        obj_signifs[obj_sign] = obj_sign.add_significance()
        signs[obj] = obj_sign
        if obj_sign.name == plagent:
            connector = obj_signifs[obj_sign].add_feature(obj_signifs[I_sign], zero_out=True)
            I_sign.add_out_significance(connector)
            obj_means[obj_sign] = obj_sign.add_meaning()

    for tp, objects in type_map.items():
        if exp_signs:
            tp_sign = signs[tp.name]
        else:
            tp_sign = Sign(tp.name)
        for obj in objects:
            obj_signif = obj_signifs[signs[obj]]
            tp_signif = tp_sign.add_significance()
            connector = tp_signif.add_feature(obj_signif, zero_out=True)
            signs[obj].add_out_significance(connector)
        if not exp_signs:
            signs[tp.name] = tp_sign

    others = set()
    for id in range(1, len(signs['agent'].significances)+1):
        other_ag = signs['agent'].significances[id].get_signs()
        if signs[plagent] not in other_ag:
            others |= other_ag

    for subagent in others:
        if subagent.name != plagent:
            if not They_sign in subagent.significances[1].get_signs():
                signif = obj_signifs[They_sign]
                if signif.is_empty():
                    They_signif = signif
                else:
                    They_signif = They_sign.add_significance()
                connector = subagent.significances[1].add_feature(They_signif, zero_out=True)
                They_sign.add_out_significance(connector)
                obj_means[subagent] = subagent.add_meaning()

    if not exp_signs:
        updated_predicates = _update_predicates(predicates, actions)
        signify_predicates(predicates, updated_predicates, signs, subtype_map, domain.constants)
        signify_actions(actions, constraints, signs, plagent, obj_means, obj_signifs)
        signify_connection(signs)

    start_situation, pms = _define_situation('*start*', problem.initial_state, signs, 'image')
    goal_situation, pms = _define_situation('*finish*', problem.goal, signs, 'image')
    # if problem.name.startswith("blocks"):
    #     list_signs = task_signs(problem)
    #     _expand_situation_ma_blocks(goal_situation, signs, pms, list_signs)  # For task
    return MaPlanningTask(problem.name, signs, start_situation, goal_situation)
Example #54
0
def start_logger():
	logging.debug("message 1 message 2")
Example #55
0
    def combine(self):
        """
        Function for combining multiple coco datasets
        """

        self.resim_dir = os.path.join(self.res_dir, 'combination', 'images')
        self.resann_dir = os.path.join(self.res_dir, 'combination', 'annotations')

        # Create directories for combination results and clear the previous ones
        # The exist_ok is for dealing with combination folder
        # TODO: Can be done better
        if os.path.exists(self.resim_dir) is False:
            os.makedirs(self.resim_dir, exist_ok=True)
        else:
            shutil.rmtree(self.resim_dir)
            os.makedirs(self.resim_dir, exist_ok=True)
        if os.path.exists(self.resann_dir) is False:
            os.makedirs(self.resann_dir, exist_ok=True)
        else:
            shutil.rmtree(self.resann_dir)
            os.makedirs(self.resann_dir, exist_ok=True)

        # Combine images
        print("Merging image dirs")
        im_dirs = [os.path.join(self.img_dir, folder) for folder in self.imgfolders]
        imext = [".png", ".jpg"]

        logging.debug("Combining Images...")

        for imdir in tqdm(im_dirs):
            ims = [i for i in os.listdir(imdir) if i[-4:].lower() in imext]
            for im in ims:
                shutil.copyfile(os.path.join(imdir, im), os.path.join(self.resim_dir, im))

        # Combine annotations
        cann = {'images': [],
                'annotations': [],
                'info': None,
                'licenses': None,
                'categories': None}

        logging.debug("Combining Annotations...")

        dst_ann = os.path.join(self.resann_dir, 'combined.json')

        print("Merging annotations")
        for j in tqdm(self.jsonfiles):
            with open(os.path.join(self.ann_dir, j)) as a:
                c = json.load(a)

            ind = self.jsonfiles.index(j)
            cocofile = self.annfiles[ind]
            # Check if this is the 1st annotation.
            # If it is, continue else modify current annotation
            if ind == 0:
                cann['images'] = cann['images'] + c['images']
                cann['annotations'] = cann['annotations'] + c['annotations']
                if 'info' in list(c.keys()):
                    cann['info'] = c['info']
                if 'licenses' in list(c.keys()):
                    cann['licenses'] = c['licenses']
                cann['categories'] = c['categories']

                last_imid = cann['images'][-1]['id']
                last_annid = cann['annotations'][-1]['id']

                logging.debug("String Ids detected. Converting to int")

                # If last imid or last_annid is a str, convert it to int
                if isinstance(last_imid, str):
                    id_dict = {}
                    # Change image id in images field
                    for i, im in enumerate(cann['images']):
                       id_dict[im['id']] = i
                       im['id'] = i

                if isinstance(last_annid, str):
                    # Change annotation id & image id in annotations field
                    for i, im in enumerate(cann['annotations']):
                        im['id'] = i
                        if isinstance(last_imid, str):
                            im['image_id'] = id_dict[im['image_id']]

                last_imid = cann['images'][-1]['id']
                last_annid = cann['annotations'][-1]['id']

            else:
                new_imids = [(last_imid + i + 1) for i in sorted(list(cocofile.imgs.keys()))]
                new_annids = [(last_annid + i + 1) for i in sorted(list(cocofile.anns.keys()))]

                def modify_ids(jf, imids, annids):
                    id_dict = {}
                    for img, newimid in zip(jf['images'], imids):
                        id_dict[img['id']] = newimid
                        img['id'] = newimid
                    for ann, newannid in zip(jf['annotations'], annids):
                        ann['id'] = newannid
                        ann['image_id'] = id_dict[ann['image_id']]
                    return jf

                c = modify_ids(c, new_imids, new_annids)
                cann['images'] = cann['images'] + c['images']
                cann['annotations'] = cann['annotations'] + c['annotations']
                if 'info' in list(c.keys()):
                    cann['info'] = c['info']
                if 'licenses' in list(c.keys()):
                    cann['licenses'] = c['licenses']
                cann['categories'] = c['categories']

                last_imid = cann['images'][-1]['id']
                last_annid = cann['annotations'][-1]['id']

        with open(dst_ann, 'w') as aw:
            json.dump(cann, aw)
Example #56
0
 def watch_cb(name):
     if name == '':
         logging.debug('%s went offline' % sender)
         self.devices.remove(wrap)
         wrap.remove_from_connection()
         watcher.cancel()
Example #57
0
def run(test, params, env):
    """
    Test qmp event notification, this case will:
    1) Start VM with qmp enable.
    2) Connect to qmp port then run qmp_capabilities command.
    3) Initiate the qmp command defined in config (qmp_cmd)
    4) Verify that qmp command works as designed.

    :param test: QEMU test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environmen.
    """
    def check_result(qmp_o, output=None, exception_list=""):
        """
        Check test result with difference way accoriding to
        result_check.
        result_check = equal, will compare cmd_return_value with qmp
                       command output.
        result_check = contain, will try to find cmd_return_value in qmp
                       command output.
        result_check = m_equal_q, will compare key value in monitor command
                       output and qmp command output.
        result_check = m_in_q, will try to find monitor command output's key
                       value in qmp command output.
        result_check = m_format_q, will try to match the output's format with
                       check pattern.

        :param qmp_o: output from pre_cmd, qmp_cmd or post_cmd.
        :param o: output from pre_cmd, qmp_cmd or post_cmd or an execpt
        :param exception_list: element no need check.
        result set in config file.
        """
        if result_check == "equal":
            value = output
            if value != str(qmp_o):
                raise exceptions.TestFail("QMP command return value does not match "
                                          "the expect result. Expect result: '%s'\n"
                                          "Actual result: '%s'" % (value, qmp_o))
        elif result_check == "contain":
            values = output.split(';')
            for value in values:
                if value in exception_list:
                    continue
                if value.strip() not in str(qmp_o):
                    raise exceptions.TestFail("QMP command output does not contain "
                                              "expect result. Expect result: '%s'\n"
                                              "Actual result: '%s'"
                                              % (value, qmp_o))
        elif result_check == "not_contain":
            values = output.split(';')
            for value in values:
                if value in exception_list:
                    continue
                if value in str(qmp_o):
                    raise exceptions.TestFail("QMP command output contains unexpect"
                                              " result. Unexpect result: '%s'\n"
                                              "Actual result: '%s'"
                                              % (value, qmp_o))
        elif result_check == "m_equal_q":
            msg = "QMP command ouput is not equal to in human monitor command."
            msg += "\nQMP command output: '%s'" % qmp_o
            msg += "\nHuman command output: '%s'" % output
            res = output.splitlines(True)
            if type(qmp_o) != type(res):
                len_o = 1
            else:
                len_o = len(qmp_o)
            if len(res) != len_o:
                if res[0].startswith(' '):
                    raise exceptions.TestFail("Human command starts with ' ', "
                                              "there is probably some garbage in "
                                              "the output.\n" + msg)
                res_tmp = []
                #(qemu)info block in RHEL7 divided into 3 lines
                for line in res:
                    if not line.startswith(' '):
                        res_tmp.append(line)
                    else:
                        res_tmp[-1] += line
                res = res_tmp
                if len(res) != len_o:
                    raise exceptions.TestFail(msg)
            re_str = r'([^ \t\n\r\f\v=]*)=([^ \t\n\r\f\v=]*)'
            for i in range(len(res)):
                if qmp_cmd == "query-version":
                    version = qmp_o['qemu']
                    version = "%s.%s.%s" % (version['major'], version['minor'],
                                            version['micro'])
                    package = qmp_o['package']
                    re_str = r"([0-9]+\.[0-9]+\.[0-9]+)\s*(\(\S*\))?"
                    hmp_version, hmp_package = re.findall(re_str, res[i])[0]
                    if not hmp_package:
                        hmp_package = package
                    hmp_package = hmp_package.strip()
                    package = package.strip()
                    hmp_version = hmp_version.strip()
                    if version != hmp_version or package != hmp_package:
                        raise exceptions.TestFail(msg)
                else:
                    matches = re.findall(re_str, res[i])
                    for key, val in matches:
                        if key in exception_list:
                            continue
                        if '0x' in val:
                            val = long(val, 16)
                            val_str = str(bin(val))
                            com_str = ""
                            for p in range(3, len(val_str)):
                                if val_str[p] == '1':
                                    com_str += '0'
                                else:
                                    com_str += '1'
                            com_str = "0b" + com_str
                            value = eval(com_str) + 1
                            if val_str[2] == '1':
                                value = -value
                            if value != qmp_o[i][key]:
                                msg += "\nValue in human monitor: '%s'" % value
                                msg += "\nValue in qmp: '%s'" % qmp_o[i][key]
                                raise exceptions.TestFail(msg)
                        elif qmp_cmd == "query-block":
                            cmp_str = "u'%s': u'%s'" % (key, val)
                            cmp_s = "u'%s': %s" % (key, val)
                            if '0' == val:
                                cmp_str_b = "u'%s': False" % key
                            elif '1' == val:
                                cmp_str_b = "u'%s': True" % key
                            else:
                                cmp_str_b = cmp_str
                            if (cmp_str not in str(qmp_o[i]) and
                                    cmp_str_b not in str(qmp_o[i]) and
                                    cmp_s not in str(qmp_o[i])):
                                msg += ("\nCan not find '%s', '%s' or '%s' in "
                                        " QMP command output."
                                        % (cmp_s, cmp_str_b, cmp_str))
                                raise exceptions.TestFail(msg)
                        elif qmp_cmd == "query-balloon":
                            if (int(val) * 1024 * 1024 != qmp_o[key] and
                                    val not in str(qmp_o[key])):
                                msg += ("\n'%s' is not in QMP command output"
                                        % val)
                                raise exceptions.TestFail(msg)
                        else:
                            if (val not in str(qmp_o[i][key]) and
                                    str(bool(int(val))) not in str(qmp_o[i][key])):
                                msg += ("\n'%s' is not in QMP command output"
                                        % val)
                                raise exceptions.TestFail(msg)
        elif result_check == "m_in_q":
            res = output.splitlines(True)
            msg = "Key value from human monitor command is not in"
            msg += "QMP command output.\nQMP command output: '%s'" % qmp_o
            msg += "\nHuman monitor command output '%s'" % output
            for i in range(len(res)):
                params = res[i].rstrip().split()
                for param in params:
                    if param.rstrip() in exception_list:
                        continue
                    try:
                        str_o = str(qmp_o.values())
                    except AttributeError:
                        str_o = qmp_o
                    if param.rstrip() not in str(str_o):
                        msg += "\nKey value is '%s'" % param.rstrip()
                        raise error.TestFail(msg)
        elif result_check == "m_format_q":
            match_flag = True
            for i in qmp_o:
                if output is None:
                    raise exceptions.TestError("QMP output pattern is missing")
                if re.match(output.strip(), str(i)) is None:
                    match_flag = False
            if not match_flag:
                msg = "Output does not match the pattern: '%s'" % output
                raise exceptions.TestFail(msg)

    qemu_binary = utils_misc.get_qemu_binary(params)
    if not utils_misc.qemu_has_option("qmp", qemu_binary):
        raise exceptions.TestSkipError("Host qemu does not support qmp.")

    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()

    session = vm.wait_for_login(timeout=int(params.get("login_timeout", 360)))

    module = params.get("modprobe_module")
    if module:
        logging.info("modprobe the module %s", module)
        session.cmd("modprobe %s" % module)

    qmp_ports = vm.get_monitors_by_type('qmp')
    if qmp_ports:
        qmp_port = qmp_ports[0]
    else:
        raise exceptions.TestError("Incorrect configuration, no QMP monitor found.")
    hmp_ports = vm.get_monitors_by_type('human')
    if hmp_ports:
        hmp_port = hmp_ports[0]
    else:
        raise exceptions.TestError("Incorrect configuration, no QMP monitor found.")
    callback = {"host_cmd": lambda cmd: process.system_output(cmd, shell=True),
                "guest_cmd": session.get_command_output,
                "monitor_cmd": hmp_port.send_args_cmd,
                "qmp_cmd": qmp_port.send_args_cmd}

    def send_cmd(cmd):
        """ Helper to execute command on ssh/host/monitor """
        if cmd_type in callback.keys():
            return callback[cmd_type](cmd)
        else:
            raise exceptions.TestError("cmd_type is not supported")

    pre_cmd = params.get("pre_cmd")
    qmp_cmd = params.get("qmp_cmd")
    cmd_type = params.get("event_cmd_type")
    post_cmd = params.get("post_cmd")
    result_check = params.get("cmd_result_check")
    cmd_return_value = params.get("cmd_return_value")
    exception_list = params.get("exception_list", "")

    # Pre command
    if pre_cmd is not None:
        logging.info("Run prepare command '%s'.", pre_cmd)
        pre_o = send_cmd(pre_cmd)
        logging.debug("Pre-command: '%s'\n Output: '%s'", pre_cmd, pre_o)
    try:
        # Testing command
        logging.info("Run qmp command '%s'.", qmp_cmd)
        output = qmp_port.send_args_cmd(qmp_cmd)
        logging.debug("QMP command: '%s' \n Output: '%s'", qmp_cmd, output)
    except qemu_monitor.QMPCmdError, err:
        if params.get("negative_test") == 'yes':
            logging.debug("Negative QMP command: '%s'\n output:'%s'", qmp_cmd,
                          err)
            if params.get("negative_check_pattern"):
                check_pattern = params.get("negative_check_pattern")
                if check_pattern not in str(err):
                    raise exceptions.TestFail("'%s' not in exception '%s'"
                                              % (check_pattern, err))
        else:
            raise exceptions.TestFail(err)
Example #58
0
                check_pattern = params.get("negative_check_pattern")
                if check_pattern not in str(err):
                    raise exceptions.TestFail("'%s' not in exception '%s'"
                                              % (check_pattern, err))
        else:
            raise exceptions.TestFail(err)
    except qemu_monitor.MonitorProtocolError, err:
        raise exceptions.TestFail(err)
    except Exception, err:
        raise exceptions.TestFail(err)

    # Post command
    if post_cmd is not None:
        logging.info("Run post command '%s'.", post_cmd)
        post_o = send_cmd(post_cmd)
        logging.debug("Post-command: '%s'\n Output: '%s'", post_cmd, post_o)

    if result_check is not None:
        txt = "Verify that qmp command '%s' works as designed." % qmp_cmd
        logging.info(txt)
        if result_check == "equal" or result_check == "contain":
            if qmp_cmd == "query-name":
                vm_name = params["main_vm"]
                check_result(output, vm_name, exception_list)
            elif qmp_cmd == "query-uuid":
                uuid_input = params["uuid"]
                check_result(output, uuid_input, exception_list)
            else:
                check_result(output, cmd_return_value, exception_list)
        elif result_check == "m_format_q":
            check_result(output, cmd_return_value, exception_list)
Example #59
0
def create(username, project, zone, machine_type, accelerator_count,
           accelerator_type, image, nvme_count, ssh_internal_ip, ssh_key_file,
           cpu_min_platform=None, boot_ssd_size=None):
  """Create gcloud computing instance.

  Args:
    username: the username of the current user
    project: project name
    zone: zone of the GCP computing instance
    machine_type: the machine type used for the instance
    accelerator_count: the number of pieces of the accelerator to attach to
                       the instance
    accelerator_type: the specific type of accelerator to attach to the instance
    image: the name of the image that the disk will be initialized with
    nvme_count: the number of NVME local SSD devices to attach to the instance
    ssh_internal_ip: internal ip to use for ssh.
    ssh_key_file: ssh key file to use to connect to instance.
    cpu_min_platform: minimum CPU platform to use, if None use default.
    boot_ssd_size: If set boot disk is changed to SSD and this size(GB) is used.
  """
  instance_name = get_instance_name(username)
  machine_type = get_machine_type(machine_type, accelerator_count)
  logging.debug('Creating gcloud computing instance %s', instance_name)

  cmd = '''gcloud compute instances create {} \
--image={} \
--project={} \
--zone={} \
--machine-type={} \
--maintenance-policy=TERMINATE \
'''.format(instance_name, image, project, zone, machine_type)

  if boot_ssd_size:
    cmd += '--boot-disk-size={}GB --boot-disk-type=pd-ssd '.format(
        boot_ssd_size)

  if accelerator_count > 0:
    cmd += '--accelerator=count={},type={} '.format(
        accelerator_count, accelerator_type)

  if cpu_min_platform:
    cmd += '--min-cpu-platform="{}" '.format(cpu_min_platform)

  for _ in range(nvme_count):
    cmd += '--local-ssd=interface=NVME '

  run_command(cmd, is_from_user=True)
  logging.info('Successfully created gcloud computing instance %s '
               'with %s accelerator.\n', instance_name, accelerator_count)

  ssh_prefix = _ssh_prefix(project, zone, ssh_internal_ip, ssh_key_file)
  # Wait until we can ssh to the newly created computing instance
  cmd = '{} --strict-host-key-checking=no --command="exit" {}'.format(
      ssh_prefix, instance_name)
  ssh_remaining_retries = 12
  ssh_error = None
  while ssh_remaining_retries > 0:
    ssh_remaining_retries -= 1
    try:
      run_command(cmd, is_from_user=False)
      ssh_error = None
    except Exception as error:  # pylint: disable=broad-except
      ssh_error = error
      if ssh_remaining_retries:
        logging.info('Cannot ssh to the computing instance. '
                     'Try again after 5 seconds')
        time.sleep(5)
      else:
        logging.error('Cannot ssh to the computing instance after '
                      '60 seconds due to error:\n%s', str(ssh_error))

  if ssh_error:
    logging.info('Run the commands below manually after ssh into the computing '
                 'instance:\n'
                 'git clone https://github.com/tensorflow/benchmarks.git\n'
                 'sudo usermod -a -G docker $USER\n')
  else:
    cmd = '{} --command="git clone {}" {}'.format(
        ssh_prefix, 'https://github.com/tensorflow/benchmarks.git',
        instance_name)
    run_command(cmd, is_from_user=True)
    logging.info('Successfully checked-out PerfZero code on the '
                 'computing instance\n')

    cmd = '{} --command="sudo usermod -a -G docker $USER" {}'.format(
        ssh_prefix, instance_name)
    run_command(cmd, is_from_user=True)
    logging.info('Successfully added user to the docker group\n')

  cmd = '{} {} -- -L 6006:127.0.0.1:6006'.format(ssh_prefix, instance_name)
  logging.info('Run the command below to ssh to the instance together with '
               'port forwarding for tensorboard:\n%s\n', cmd)
Example #60
0
 def proxy_call(self, cb):
     if self.suspended:
         logging.debug('The service is suspended, delay the call')
         self.callbacks += [cb]
     else:
         cb()