Exemplo n.º 1
0
def fatal(message):
  tracemessage = ''
  lineno = __getLineNumber()
  if lineno is not None:
    tracemessage = '[' + __currFile + ':' + str(lineno) + '] '
  logging.fatal(tracemessage + message)
  cleanUpAndExit(-1)
Exemplo n.º 2
0
  def submit_job_description(self, job):
    """Creates and excutes a job request."""
    request = dataflow.DataflowProjectsLocationsJobsCreateRequest()
    request.projectId = self.google_cloud_options.project
    request.location = self.google_cloud_options.region
    request.job = job.proto

    try:
      response = self._client.projects_locations_jobs.Create(request)
    except exceptions.BadStatusCodeError as e:
      logging.error('HTTP status %d trying to create job'
                    ' at dataflow service endpoint %s',
                    e.response.status,
                    self.google_cloud_options.dataflow_endpoint)
      logging.fatal('details of server error: %s', e)
      raise
    logging.info('Create job: %s', response)
    # The response is a Job proto with the id for the new job.
    logging.info('Created job with id: [%s]', response.id)
    logging.info(
        'To access the Dataflow monitoring console, please navigate to '
        'https://console.developers.google.com/project/%s/dataflow/job/%s',
        self.google_cloud_options.project, response.id)

    return response
Exemplo n.º 3
0
    def __deleteEpisodesFromTraktCollection(self, traktShows, kodiShows, fromPercent, toPercent):
        if kodiUtilities.getSettingAsBool('clean_trakt_episodes') and not self.sync.IsCanceled():
            removeTraktShows = copy.deepcopy(traktShows)
            removeKodiShows = copy.deepcopy(kodiShows)

            traktShowsRemove = self.__compareEpisodes(removeTraktShows, removeKodiShows)
            utilities.sanitizeShows(traktShowsRemove)

            if len(traktShowsRemove['shows']) == 0:
                self.sync.UpdateProgress(toPercent, line1=kodiUtilities.getString(32077), line2=kodiUtilities.getString(32110))
                logger.debug('[Episodes Sync] Trakt.tv episode collection is clean, no episodes to remove.')
                return

            logger.debug("[Episodes Sync] %i show(s) will have episodes removed from Trakt.tv collection." % len(traktShowsRemove['shows']))
            for show in traktShowsRemove['shows']:
                logger.debug("[Episodes Sync] Episodes removed: %s" % self.__getShowAsString(show, short=True))

            self.sync.UpdateProgress(fromPercent, line1=kodiUtilities.getString(32077), line2=kodiUtilities.getString(32111) % self.__countEpisodes(traktShowsRemove))

            logger.debug("[traktRemoveEpisodes] Shows to remove %s" % traktShowsRemove)
            try:
                self.sync.traktapi.removeFromCollection(traktShowsRemove)
            except Exception as ex:
                message = utilities.createError(ex)
                logging.fatal(message)

            self.sync.UpdateProgress(toPercent, line2=kodiUtilities.getString(32112) % self.__countEpisodes(traktShowsRemove))
Exemplo n.º 4
0
def processCat(catrel, arch, osrel, rest_client):
    logger.info("processCat: -> %r %r %r" % (catrel, arch, osrel))

    cc = CompCatalog(catrel, arch, osrel, rest_client)
    pkg_by_pkgname = cc.getCatalog()
    logger.info("processCat: iterate on %r" % (catrel))

    # build reverse dependency list
    rev_deps_access = RevDeps(rest_client)
    rev_deps_by_pkg = {}
    for pkgname in pkg_by_pkgname:
        pkg = pkg_by_pkgname[pkgname]
        # RevDepsByPkg returns only md5 sums and pkgnames, so we need to map
        # them back to CatSubSet
        revdeps = rev_deps_access.RevDepsByPkg(catrel, arch, osrel, pkgname)
        revdep_pkgs = []
        for _, pkgname in revdeps:
            revdep_pkg = pkg_by_pkgname[pkgname]
            revdep_pkgs.append(revdep_pkg)
        try:
            rev_deps_by_pkg[pkg] = revdep_pkgs
        except TypeError:
            logging.fatal("pkg: %r", pkg)
            raise

    logger.info("processCat: <- %r %r %r" % (catrel, arch, osrel))
    return pkg_by_pkgname, rev_deps_by_pkg
Exemplo n.º 5
0
def main():
  OPTIONS.parse_configure_file()
  logging.getLogger().setLevel(logging.INFO)

  if len(sys.argv) != 3:
    logging.fatal('Usage: %s android-lib.so arc-lib.so' % sys.argv[0])
    return 1

  android_lib = sys.argv[1]
  arc_lib = sys.argv[2]

  android_syms = get_defined_symbols(android_lib)
  arc_syms = get_defined_symbols(arc_lib)

  missing_syms = set(android_syms - arc_syms)

  # Ignore symbols starting with two underscores since they are internal ones.
  # However, we do not ignore symbols starting with one underscore so that the
  # script can check symbols such as _Zxxx (mangled C++ symbols), _setjmp, and
  # _longjmp.
  important_missing_syms = [
      sym for sym in missing_syms if not sym.startswith('__')]

  if important_missing_syms:
    for sym in sorted(important_missing_syms):
      logging.error('Missing symbol: %s' % sym)
    return 1
  return 0
Exemplo n.º 6
0
  def _VerifyStateDatabase(self):
    """Check that the state table exists and create an entry for our input file.

    If an entry for the input file already exists, verify its size and
    checksum.
    """
    rows = self._db.ExecuteOrDie(
        'SELECT Checksum, Size, Offset FROM %s.%s WHERE Filename = "%s"' %
        (self._state_database, self._state_table, self._filename))

    if not rows:
      logging.info('Creating row in state table')
      with self._db.Transaction():
        self._db.ExecuteOrDie(
            'INSERT INTO %s.%s VALUES ("%s", %d, "%s", 0, NULL)' %
            (self._state_database, self._state_table, self._filename,
             self._size, self._checksum))
    else:
      if self._size != long(rows[0]['Size']):
        logging.fatal('database filesize does not match actual file: %s vs %s',
                      self._size, rows[0]['Size'])
      if self._checksum != rows[0]['Checksum']:
        logging.fatal('SHA-1 checksum mismatch on file vs database')
      self._offset_bytes = rows[0]['Offset']
      logging.info('Resuming at offset %d', self._offset_bytes)
Exemplo n.º 7
0
	def find_or_create_pkg_info(self, link):
		#link   # http://pypi.python.org/pypi/getvps/0.1
		# check if link matches the format we are expecting:
		# http://pypi.python.org/pypi/<package>/<version>
		match = re.search(r'http://pypi.python.org/pypi/([^/]+)/([^/]+)$', link)
		if not match:
			logging.error("url {} does not match the expected format".format(link))
			return False
	
		package = match.group(1)
		version = match.group(2)
	
		data = self.packages.find_one({'package' : package, 'version' : version })
		if not data:
			data = {
				'package' : package,
				'version' : version,
				'status'  : 'waiting_for_zip_url'
			}
			logging.info("adding package {} version {} to the database ".format(package, version))
			self.packages.insert(data)
			data = self.packages.find_one({'package' : package, 'version' : version })
	
		if not data:
			logging.fatal("data just added and cannot be found? package {} version {}".format(package, version))
			return False

		self.data = data
		return True
Exemplo n.º 8
0
def main():
    #parsing arguments
    usage = "Usage: %prog [-l] [config_path [backup_name]]\n" \
            "If [config_path] is not specified ~/.pgbackup will be used\n" \
            "If [backup_name] is not specified scheduled backups will be executed"

    parser = optparse.OptionParser(usage)

    parser.add_option("-l", "--list", action="store_true", dest="list",
                      help="Lists servers/backups")

    options, args = parser.parse_args()

    if len(args) > 2:
        parser.error('Too many arguments')
    if len(args) > 0:
        config_path = args[0]
    else:
        config_path = "~/.pgbackup"

    try:
        scheduler = Scheduler(config_path)
        if len(args) == 2:
            backup_name = args[1]
            if options.list is True:
                pass #TODO: show backup info
            else:
                scheduler.start(backup_name)
        else:
            if options.list is True:
                pass #TODO: list servers/backups
            else:
                scheduler.start_current()
    except Exception, e:
        logging.fatal(e)
Exemplo n.º 9
0
	def ask_NCBI(self, url=None):
		try:
			response = urllib.urlopen(url if url is not None else self.args["url"])
			return response.read()
		except Exception, e:
			logging.fatal(e)
			sys.exit(2)
Exemplo n.º 10
0
def prepareForSave():  # pylint: disable=invalid-name
    """Called by client when the game is about to be saved, to let the Python AI know it should save any AI state
    information, such as plans or knowledge about the game from previous turns,
    in the state string so that they can be restored if the game is loaded."""
    empire = fo.getEmpire()
    if empire is None:
        fatal("This client has no empire. Doing nothing to prepare for save.")
        return

    if empire.eliminated:
        info("This empire has been eliminated. Save info request")
        return

    info("Preparing for game save by serializing state")

    # serialize (convert to string) global state dictionary and send to AI client to be stored in save file
    import savegame_codec
    try:
        dump_string = savegame_codec.build_savegame_string()
        fo.setSaveStateString(dump_string)
    except Exception as e:
        error("Failed to encode the AIstate as save-state string. "
              "The resulting save file should be playable but the AI "
              "may have a different aggression. The error raised was: %s"
              % e, exc_info=True)
Exemplo n.º 11
0
def init_save_key(key_size, key_dir, keyname="key-letsencrypt.pem"):
    """Initializes and saves a privkey.

    Inits key and saves it in PEM format on the filesystem.

    .. note:: keyname is the attempted filename, it may be different if a file
        already exists at the path.

    :param int key_size: RSA key size in bits
    :param str key_dir: Key save directory.
    :param str keyname: Filename of key

    :returns: Key
    :rtype: :class:`letsencrypt.le_util.Key`

    :raises ValueError: If unable to generate the key given key_size.

    """
    try:
        key_pem = make_key(key_size)
    except ValueError as err:
        logging.fatal(str(err))
        raise err

    # Save file
    le_util.make_or_verify_dir(key_dir, 0o700, os.geteuid())
    key_f, key_path = le_util.unique_file(
        os.path.join(key_dir, keyname), 0o600)
    key_f.write(key_pem)
    key_f.close()

    logging.info("Generating key (%d bits): %s", key_size, key_path)

    return le_util.Key(key_path, key_pem)
Exemplo n.º 12
0
	def __deleteMoviesFromTraktCollection(self, traktMovies, kodiMovies):

		if utilities.getSettingAsBool('clean_trakt_movies') and not self.__isCanceled():
			removeTraktMovies = copy.deepcopy(traktMovies)
			removeKodiMovies = copy.deepcopy(kodiMovies)

			logger.debug("[Movies Sync] Starting to remove.")
			traktMoviesToRemove = self.__compareMovies(removeTraktMovies, removeKodiMovies)
			self.sanitizeMovies(traktMoviesToRemove)
			logger.debug("[Movies Sync] Compared movies, found %s to remove." % len(traktMoviesToRemove))

			if len(traktMoviesToRemove) == 0:
				self.__updateProgress(60, line2=utilities.getString(32091))
				logger.debug("[Movies Sync] Trakt.tv movie collection is clean, no movies to remove.")
				return

			titles = ", ".join(["%s" % (m['title']) for m in traktMoviesToRemove])
			logger.debug("[Movies Sync] %i movie(s) will be removed from Trakt.tv collection." % len(traktMoviesToRemove))
			logger.debug("[Movies Sync] Movies removed: %s" % titles)

			self.__updateProgress(49, line2=utilities.getString(32076) % len(traktMoviesToRemove))

			moviesToRemove = {'movies': traktMoviesToRemove}
			try:
				self.traktapi.removeFromCollection(moviesToRemove)
			except Exception as ex:
				message = utilities.createError(ex)
				logging.fatal(message)

			self.__updateProgress(60, line2=utilities.getString(32092) % len(traktMoviesToRemove))
Exemplo n.º 13
0
  def get(self):
    auth = authorizer.Authorizer(self)
    if not auth.CanAdministerInstitutionFromUrl():
      auth.Redirect()
      return

    institution = self.request.get("institution")
    if not institution:
      logging.fatal("no institution")
    session = self.request.get("session")
    if not session:
      logging.fatal("no session")

    logout_url = auth.GetLogoutUrl(self)
    message = self.request.get('message')
    session_query = urllib.urlencode({'institution': institution,
                                      'session': session})

    num_students = len(yaml.load(models.Students.fetch(institution, session)))
    template_values = {
      'logout_url': logout_url,
      'user_email' : auth.email,
      'institution' : institution,
      'session' : session,
      'message': message,
      'session_query': session_query,
      'self': self.request.uri,
      'num_students': num_students,
    }
    template = JINJA_ENVIRONMENT.get_template('scheduler.html')
    self.response.write(template.render(template_values))
Exemplo n.º 14
0
  def get(self):
    auth = authorizer.Authorizer(self)
    if not auth.CanAdministerInstitutionFromUrl():
      auth.Redirect()
      return

    institution = self.request.get("institution")
    if not institution:
      logging.fatal("no institution")
    session = self.request.get("session")
    if not session:
      logging.fatal("no session")

    message = self.request.get('message')
    session_query = urllib.urlencode({'institution': institution,
                                      'session': session})

    setup_status = error_check_logic.Checker.getStatus(institution, session)
    students = models.Students.FetchJson(institution, session)
    template_values = {
      'user_email' : auth.email,
      'institution' : institution,
      'session' : session,
      'message': message,
      'setup_status': setup_status,
      'session_query': session_query,
      'students': students,
    }
    template = JINJA_ENVIRONMENT.get_template('impersonation.html')
    self.response.write(template.render(template_values))
Exemplo n.º 15
0
	def __addMoviesToTraktCollection(self, kodiMovies, traktMovies):
		if utilities.getSettingAsBool('add_movies_to_trakt') and not self.__isCanceled():
			addTraktMovies = copy.deepcopy(traktMovies)
			addKodiMovies = copy.deepcopy(kodiMovies)

			traktMoviesToAdd = self.__compareMovies(addKodiMovies, addTraktMovies)
			self.sanitizeMovies(traktMoviesToAdd)
			logger.debug("[Movies Sync] Compared movies, found %s to add." % len(traktMoviesToAdd))

			if len(traktMoviesToAdd) == 0:
				self.__updateProgress(48, line2=utilities.getString(32084))
				logger.debug("[Movies Sync] Trakt.tv movie collection is up to date.")
				return

			titles = ", ".join(["%s" % (m['title']) for m in traktMoviesToAdd])
			logger.debug("[Movies Sync] %i movie(s) will be added to Trakt.tv collection." % len(traktMoviesToAdd))
			logger.debug("[Movies Sync] Movies to add : %s" % titles)

			self.__updateProgress(37, line2=utilities.getString(32063) % len(traktMoviesToAdd))

			moviesToAdd = {'movies': traktMoviesToAdd}
			#logger.debug("Movies to add: %s" % moviesToAdd)
			try:
				self.traktapi.addToCollection(moviesToAdd)
			except Exception as ex:
				message = utilities.createError(ex)
				logging.fatal(message)

			self.__updateProgress(48, line2=utilities.getString(32085) % len(traktMoviesToAdd))
Exemplo n.º 16
0
    def load_markers(self, filename, attachments, max_frames=1e100):
        '''Load marker data and attachment preferences into the model.

        Parameters
        ----------
        filename : str
            The name of a file containing marker data. This currently needs to
            be either a .C3D or a .CSV file. CSV files must adhere to a fairly
            strict column naming convention; see :func:`Markers.load_csv` for
            more information.
        attachments : str
            The name of a text file specifying how markers are attached to
            skeleton bodies.
        max_frames : number, optional
            Only read in this many frames of marker data. By default, the entire
            data file is read into memory.

        Returns
        -------
        markers : :class:`Markers`
            Returns a markers object containing loaded marker data as well as
            skeleton attachment configuration.
        '''
        self.markers = Markers(self)
        fn = filename.lower()
        if fn.endswith('.c3d'):
            self.markers.load_c3d(filename, max_frames=max_frames)
        elif fn.endswith('.csv') or fn.endswith('.csv.gz'):
            self.markers.load_csv(filename, max_frames=max_frames)
        else:
            logging.fatal('%s: not sure how to load markers!', filename)
        self.markers.load_attachments(attachments, self.skeleton)
Exemplo n.º 17
0
    def discover_collection(self):
        collections = self.db.collection_names(include_system_collections=False)
        if len(collections) == 0:
            self.log.warning("No dataset in %s database." % self.get_db_name())
            self.initialized = False
            return

        if len(collections) > 1:
            logging.warning("Multiple collections found:")
            for collection_name in collections:
                logging.warning("\t%s" % collection_name)

        collection_name = collections[0]
        logging.info("Using collection: %s" % collection_name)
        self.collection = self.db[collection_name]

        # For the input database, we also want to create some indices to speed up
        # queries.
        num_docs_in_collection = self.collection.count()
        if num_docs_in_collection == 0:
            logging.fatal("Collection %s.%s is empty" %
                          (self.get_db_name(), collection_name))
            raise RuntimeError("Empty collection.")
        else:
            logging.info("Collection %s.%s has %d documents" %
                         (self.get_db_name(), collection_name, num_docs_in_collection))

        return self.collection
Exemplo n.º 18
0
def block_processor(change_q, blockfetcher, config_dir, blockdir, depth):
    last_processed_block = get_last_processed_block(config_dir)
    block_q = asyncio.Queue()
    while True:
        add_remove, block_hash, block_index = yield from change_q.get()
        if add_remove == "remove":
            the_other = block_q.pop()
            if the_other[1:] != (block_hash, block_index):
                logging.fatal("problem merging! did the block chain fork? %s %s", the_other, block_hash)
                import sys
                sys.exit(-1)
            continue
        if add_remove != "add":
            logging.error("something weird from change_q")
            continue
        if block_index < last_processed_block:
            continue
        item = (blockfetcher.get_block_future(block_hash, block_index), block_hash, block_index)
        block_q.put_nowait(item)
        if change_q.qsize() > 0:
            continue
        while block_q.qsize() > depth:
            # we have blocks that are buried and ready to write
            future, block_hash, block_index = yield from block_q.get()
            block = yield from asyncio.wait_for(future, timeout=None)
            write_block_to_disk(blockdir, block, block_index)
            update_last_processed_block(config_dir, block_index)
Exemplo n.º 19
0
def start_tracker():
    """Start the Torrent Tracker.
    """
    # parse commandline options
    parser = OptionParser()
    parser.add_option("-p", "--port", help="Tracker Port", default=0)
    parser.add_option("-b", "--background", action="store_true", default=False, help="Start in background")
    parser.add_option("-d", "--debug", action="store_true", default=False, help="Debug mode")
    (options, args) = parser.parse_args()

    # setup directories
    create_pytt_dirs()
    # setup logging
    setup_logging(options.debug)

    try:
        # start the torrent tracker
        run_app(int(options.port) or get_config().getint("tracker", "port"))
    except KeyboardInterrupt:
        logging.info("Tracker Stopped.")
        close_db()
        sys.exit(0)
    except Exception, ex:
        logging.fatal("%s" % str(ex))
        close_db()
        sys.exit(-1)
Exemplo n.º 20
0
    def post(self):
        auth = authorizer.Authorizer(self)
        if not auth.HasStudentAccess():
            auth.Redirect()
            return

        institution = self.request.get("institution")
        if not institution:
            logging.fatal("no institution")
        session = self.request.get("session")
        if not session:
            logging.fatal("no session")
        email = auth.student_email
        want = self.request.get("want").split(",")
        if want[0] == "":
            want.pop(0)
        dontcare = self.request.get("dontcare").split(",")
        if dontcare[0] == "":
            dontcare.pop(0)
        dontwant = self.request.get("dontwant").split(",")
        if dontwant[0] == "":
            dontwant.pop(0)
        models.Preferences.Store(email, institution, session, want, dontcare, dontwant)
        if self.request.get("Save") == "Save":
            logging.info("Form Saved")
        else:
            logging.info("Auto Save")
        self.RedirectToSelf(institution, session, email, "Saved Preferences")
Exemplo n.º 21
0
def main(argv=None):
    # New user interactive session (with extra bells and whistles).
    user_session = session.InteractiveSession()
    user_session.session_list.append(user_session)

    # Alow all special plugins to run.
    user_session.privileged = True

    def global_arg_cb(global_flags, _):
        if global_flags.version:
            print("This is Rekall Version %s (%s)" % (
                constants.VERSION, constants.CODENAME))

            print(rekall.get_versions())
            sys.exit(0)

    with user_session.GetRenderer().start():
        plugin_cls, flags = args.parse_args(
            argv=argv, global_arg_cb=global_arg_cb,
            user_session=user_session)

    # Install any quotas the user requested.
    user_session = quotas.wrap_session(user_session)
    try:
        # Run the plugin with plugin specific args.
        user_session.RunPlugin(plugin_cls, **config.RemoveGlobalOptions(flags))
    except Exception as e:
        logging.fatal("%s. Try --debug for more information." % e)
        if getattr(flags, "debug", None):
            pdb.post_mortem(sys.exc_info()[2])
        raise
    finally:
        user_session.Flush()
Exemplo n.º 22
0
    def check():
        if not Programs.checked:
            try:
                rdseed_proc = subprocess.Popen([Programs.rdseed], 
                                                 stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
                (out,err) = rdseed_proc.communicate()
            
            except OSError, e:
                if e.errno == 2:
                    reason =  "Could not find executable: '%s'." % Programs.rdseed
                else:
                    reason = str(e)
            
                logging.fatal('Failed to run rdseed program. %s' % reason)
                sys.exit(1)


            m = re.search(r'Release (\d+(\.\d+(\.\d+)?)?)', err)
            if not m:
                logger.warn('Cannot determine rdseed version number.')

            version = m.group(1)

            Programs.checked = True
            if cmp_version('4.7.5', version) == 1 or cmp_version(version, '5.0') == 1:
                logger.warn('Module pyrocko.rdseed has not been tested with version %s of rdseed.' % version)
Exemplo n.º 23
0
    def run(self):
        logging.info("Resolving oplogs using %i threads max" % self.thread_count)

        tailed_oplogs = []
        self.end_ts   = self.get_consistent_end_ts()
        for host in self.backup_oplogs:
            for port in self.backup_oplogs[host]:
                backup_oplog = self.backup_oplogs[host][port]
                if host in self.tailed_oplogs and port in self.tailed_oplogs[host]:
                    tailed_oplog = self.tailed_oplogs[host][port]
                    tailed_oplogs.append(tailed_oplog['file'])

                    if backup_oplog['last_ts'] is None and tailed_oplog['last_ts'] is None:
                        logging.info("No oplog changes to resolve for %s:%s" % (host, port))
                    elif backup_oplog['last_ts'] > tailed_oplog['last_ts']:
                        logging.fatal(
                            "Backup oplog is newer than the tailed oplog! This situation is unsupported. Please retry backup")
                        raise Exception, "Backup oplog is newer than the tailed oplog!", None
                    else:
                        try:
                            self._pool.apply_async(OplogResolve(
                                host,
                                port,
                                tailed_oplog['file'],
                                backup_oplog['file'],
                                backup_oplog['last_ts'],
                                self.end_ts,
                                self.dump_gzip
                            ).run)
                        except Exception, e:
                            logging.fatal("Resolve failed for %s:%s! Error: %s" % (host, port, e))
                            raise e
                else:
                    logging.info("No tailed oplog for host %s:%s" % (host, port))
Exemplo n.º 24
0
    def config_test(self):  # pylint: disable=no-self-use
        """Check the configuration of Nginx for errors.

        :returns: Success
        :rtype: bool

        """
        try:
            proc = subprocess.Popen(
                [self.config.nginx_ctl, "-t"],
                stdout=subprocess.PIPE,
                stderr=subprocess.PIPE)
            stdout, stderr = proc.communicate()
        except (OSError, ValueError):
            logging.fatal("Unable to run nginx config test")
            sys.exit(1)

        if proc.returncode != 0:
            # Enter recovery routine...
            logging.error("Config test failed")
            logging.error(stdout)
            logging.error(stderr)
            return False

        return True
Exemplo n.º 25
0
def nginx_restart(nginx_ctl):
    """Restarts the Nginx Server.

    :param str nginx_ctl: Path to the Nginx binary.

    """
    try:
        proc = subprocess.Popen([nginx_ctl, "-s", "reload"],
                                stdout=subprocess.PIPE,
                                stderr=subprocess.PIPE)
        stdout, stderr = proc.communicate()

        if proc.returncode != 0:
            # Enter recovery routine...
            logging.error("Nginx Restart Failed!")
            logging.error(stdout)
            logging.error(stderr)
            return False

    except (OSError, ValueError):
        logging.fatal(
            "Nginx Restart Failed - Please Check the Configuration")
        sys.exit(1)

    return True
Exemplo n.º 26
0
 def run(self):
     try:
         self.proc = subprocess.Popen(self.cmd,
                                      shell = False, stdout = subprocess.PIPE,
                                      stderr = subprocess.PIPE)
     except OSError:
        logging.fatal('cannot execute command')
Exemplo n.º 27
0
    def __init__(self, path):
        self.masks = []
        self.mask_groups = []

        logging.debug("Loading json %s", path)
        try:
            task = json.load(open(path, "r"))
        except ValueError:
            logging.fatal("Error parsing task: Invalid Syntax of JSON file")
            sys.exit(1)

        if 'name' not in task:
            raise SyntaxError("Error parsing task: no 'name' specified")
        self.name = task['name'].strip()
        if 'apis' not in task:
            raise SyntaxError("Error parsing task: no 'apis' specfied")
        for api in task['apis']:
            self.check_api(api)
            for fto in api['features_to_observe']:
                self.check_fto(api, fto)
                fto["mask_group"] = fto["mask_group"].strip()
                self.mask_groups.append(fto["mask_group"])
                for mask_name in [x().name for x in extractor.masks.get_all_masks([fto["mask_group"]])]:
                    fto['default_attr_ranges'] = map_value(lambda x: replace_date(x), fto['default_attr_ranges'])
                    self.masks.append(dict(fto.items() + {"mask_name": mask_name}.items()))
Exemplo n.º 28
0
def process_request_args(intput_config_file, command_line_json):
    """
    Load request arguments from a file, blend with JSON from command line.

    """
    logging.info("Loading file '%s' ..." % intput_config_file)
    try:
        request_args = json.load(open(intput_config_file, 'r'))
    except IOError as ex:
        logging.fatal("Reading request arguments file '%s' failed, "
                      "reason: %s." % (intput_config_file, ex))
        sys.exit(1)
    if command_line_json:
        logging.info("Parsing request arguments on the command line ...")
        cli_json = json.loads(command_line_json)
        # if a key exists in cli_json, update values in the main request_args dict
        for k in request_args.keys():
            if k in cli_json:
                request_args[k].update(cli_json[k])
    else:
        logging.warn("No request arguments to override (--json)? Some values will be wrong.")

    # iterate over all items recursively and warn about those ending with
    # OVERRIDE-ME, hence not overridden
    def check(items):
        for k, v in items:
            if isinstance(v, dict):
                check(v.items())
            if isinstance(v, unicode) and v.endswith("OVERRIDE-ME"):
                logging.warn("Not properly set: %s: %s" % (k, v))

    check(request_args.items())
    return request_args
Exemplo n.º 29
0
def validate_args(options, args):
    'Check and fix some of the parameters specified on the command line'
    
    # We want to convert a string of Stokes components like "I,q"
    # into something recognized by healpy.read_map, i.e. (0, 1).
    # The use of "frozenset" removes duplicates.
    component_map = {'I': 0, 'Q': 1, 'U': 2}
    try:
        stokes_component = component_map[options.stokes_component.upper()]
    except KeyError:
        log.fatal('Unknown Stokes component %s in string "%s" '
                  '(available choices are %s)',
                  sys.exc_value,
                  options.stokes_components,
                  ', '.join(['"%s"' % k for k in component_map.keys()]))
        sys.exit(1)

    # Now overwrite options.stokes_components: we do not need the
    # user-provided string any longer
    options.stokes_component = stokes_component

    if len(args) < 2:
        sys.stderr.write('Error: at least one map (and its title) are '
                         'expected on the command line\n')
        sys.exit(1)
Exemplo n.º 30
0
 def connect(self):
     try:
         logging.debug("Getting MongoDB connection to %s:%s" % (self.host, self.port))
         conn = MongoClient(host=self.host, port=int(self.port), connectTimeoutMS=int(self.conn_timeout))
     except Exception, e:
         logging.fatal("Unable to connect to %s:%s! Error: %s" % (self.host, self.port, e))
         raise e
Exemplo n.º 31
0

def main(options, args):
    program_number = 0
    for line in file(args[0]):
        tokens = line.strip().split('\t')
        if not tokens or not tokens[0] or tokens[0].startswith('#'):
            continue
        kind = tokens[0]

        if kind != 'patch':
            continue

        name = tokens[1]
        data = ParseHexDump('\t'.join(tokens[2:]), name)

        patch = BuildAmbikaPatch(ShruthiPatch(data))
        SaveAmbikaProgram(
            file('controller/data/programs2/%03d.PRO' % program_number, 'wb'),
            patch, DEFAULT_AMBIKA_PART_DATA, name)
        program_number += 1


if __name__ == '__main__':
    parser = optparse.OptionParser()
    options, args = parser.parse_args()
    if len(args) != 1:
        logging.fatal('Specify a source file')
    else:
        main(options, args)
Exemplo n.º 32
0
        config = vgg_ssd_config
    elif args.net == 'mb1-ssd':
        create_net = create_mobilenetv1_ssd
        config = mobilenetv1_ssd_config
    elif args.net == 'mb1-ssd-lite':
        create_net = create_mobilenetv1_ssd_lite
        config = mobilenetv1_ssd_config
    elif args.net == 'sq-ssd-lite':
        create_net = create_squeezenet_ssd_lite
        config = squeezenet_ssd_config
    elif args.net == 'mb2-ssd-lite':
        create_net = lambda num: create_mobilenetv2_ssd_lite(
            num, width_mult=args.mb2_width_mult)
        config = mobilenetv1_ssd_config
    else:
        logging.fatal("The net type is wrong.")
        parser.print_help(sys.stderr)
        sys.exit(1)
    train_transform = TrainAugmentation(config.image_size, config.image_mean,
                                        config.image_std)
    target_transform = MatchPrior(config.priors, config.center_variance,
                                  config.size_variance, 0.5)

    test_transform = TestTransform(config.image_size, config.image_mean,
                                   config.image_std)

    logging.info("Prepare training datasets.")
    datasets = []
    for dataset_path in args.datasets:
        if args.dataset_type == 'voc':
            dataset = VOCDataset(dataset_path,
Exemplo n.º 33
0
		pred_dec = network.mvod_bottleneck_lstm2.SSD(num_classes=num_classes, alpha = args.width_mult, is_test=True, config= config, batch_size=1)
		net = network.mvod_bottleneck_lstm2.MobileVOD(pred_enc, pred_dec)
	elif args.net == 'lstm3':
		pred_enc = network.mvod_bottleneck_lstm3.MobileNetV1(num_classes=num_classes, alpha = args.width_mult)
		pred_dec = network.mvod_bottleneck_lstm3.SSD(num_classes=num_classes, alpha = args.width_mult, is_test=True, config= config, batch_size=1)
		net = network.mvod_bottleneck_lstm3.MobileVOD(pred_enc, pred_dec)
	elif args.net == 'lstm4':
		pred_enc = network.mvod_lstm4.MobileNetV1(num_classes=num_classes, alpha = args.width_mult)
		pred_dec = network.mvod_lstm4.SSD(num_classes=num_classes, alpha = args.width_mult, is_test=True, config= config, batch_size=1)
		net = network.mvod_lstm4.MobileVOD(pred_enc, pred_dec)
	elif args.net == 'lstm5':
		pred_enc = network.mvod_lstm5.MobileNetV1(num_classes=num_classes, alpha = args.width_mult)
		pred_dec = network.mvod_lstm5.SSD(num_classes=num_classes, alpha = args.width_mult, is_test=True, config= config, batch_size=1)
		net = network.mvod_lstm5.MobileVOD(pred_enc, pred_dec)
	else:
		logging.fatal("The net type is wrong. It should be one of basenet, lstm{1,2,3,4,5}.")
		parser.print_help(sys.stderr)
		sys.exit(1)  

	timer.start("Load Model")
	net.load_state_dict(
			torch.load(args.trained_model,
					   map_location=lambda storage, loc: storage))
	net = net.to(device)
	print(f'It took {timer.end("Load Model")} seconds to load the model.')
	predictor = Predictor(net, config.image_size, config.image_mean,
                          config.image_std,
                          nms_method=args.nms_method,
                          iou_threshold=config.iou_threshold,
                          candidate_size=200,
                          sigma=0.5,
Exemplo n.º 34
0
def setup_activemq(prepend_str):
    logging.info('%s [broker] Resolving brokers', prepend_str)

    brokers_alias = []
    brokers_resolved = []
    try:
        brokers_alias = [broker.strip() for broker in config_get('messaging-hermes', 'brokers').split(',')]
    except:
        raise Exception('Could not load brokers from configuration')

    logging.info('%s [broker] Resolving broker dns alias: %s', prepend_str, brokers_alias)
    brokers_resolved = []
    for broker in brokers_alias:
        try:
            addrinfos = socket.getaddrinfo(broker, 0, socket.AF_INET, 0, socket.IPPROTO_TCP)
            brokers_resolved.extend(ai[4][0] for ai in addrinfos)
        except socket.gaierror as ex:
            logging.error('%s [broker] Cannot resolve domain name %s (%s)', prepend_str, broker, str(ex))

    logging.debug('%s [broker] Brokers resolved to %s', prepend_str, brokers_resolved)

    if not brokers_resolved:
        logging.fatal('%s [broker] No brokers resolved.', prepend_str)
        return None, None, None, None, None

    broker_timeout = 3
    if not broker_timeout:  # Allow zero in config
        broker_timeout = None

    logging.info('%s [broker] Checking authentication method', prepend_str)
    use_ssl = True
    try:
        use_ssl = config_get_bool('messaging-hermes', 'use_ssl')
    except:
        logging.info('%s [broker] Could not find use_ssl in configuration -- please update your rucio.cfg', prepend_str)

    port = config_get_int('messaging-hermes', 'port')
    vhost = config_get('messaging-hermes', 'broker_virtual_host', raise_exception=False)
    if not use_ssl:
        username = config_get('messaging-hermes', 'username')
        password = config_get('messaging-hermes', 'password')
        port = config_get_int('messaging-hermes', 'nonssl_port')

    conns = []
    for broker in brokers_resolved:
        if not use_ssl:
            logging.info('%s [broker] setting up username/password authentication: %s', prepend_str, broker)
            con = stomp.Connection12(host_and_ports=[(broker, port)],
                                     vhost=vhost,
                                     keepalive=True,
                                     timeout=broker_timeout)
        else:
            logging.info('%s [broker] setting up ssl cert/key authentication: %s', prepend_str, broker)
            con = stomp.Connection12(host_and_ports=[(broker, port)],
                                     use_ssl=True,
                                     ssl_key_file=config_get('messaging-hermes', 'ssl_key_file'),
                                     ssl_cert_file=config_get('messaging-hermes', 'ssl_cert_file'),
                                     vhost=vhost,
                                     keepalive=True,
                                     timeout=broker_timeout)

        con.set_listener('rucio-hermes',
                         HermesListener(con.transport._Transport__host_and_ports[0]))

        conns.append(con)
    destination = config_get('messaging-hermes', 'destination')
    return conns, destination, username, password, use_ssl
validate_config()


def shellquote(s):
    # https://stackoverflow.com/questions/35817/how-to-escape-os-system-calls
    return "'" + s.replace("'", "'\\''") + "'"


# setup connections to AWS
if args.run_kinesis_lambda:
    kinesis_stream_arn = getattr(settings, args.kinesis_stream_arn)
    logging.info('Kinesis stream ARN: %s', kinesis_stream_arn)
    logging.info('Kinesis endpoint: %s', settings.ENDPOINTS.get(AWS.KINESIS))
    if get_arn_from_arn_string(kinesis_stream_arn).service != AWS.KINESIS:
        logging.fatal("%s is not a Kinesis ARN", kinesis_stream_arn)
        sys.exit(1)
    kinesis_conn = get_connection(kinesis_stream_arn, disable_chaos=True)
    kinesis_stream = get_arn_from_arn_string(
        kinesis_stream_arn).slash_resource()
    logging.info('Kinesis stream: %s', kinesis_stream)

if args.run_sqs_lambda:
    sqs_queue_arn = getattr(settings, args.sqs_queue_arn)
    logging.info('SQS queue ARN: %s', sqs_queue_arn)
    logging.info('SQS endpoint: %s', settings.ENDPOINTS.get(AWS.SQS))
    if get_arn_from_arn_string(sqs_queue_arn).service != AWS.SQS:
        logging.fatal("%s is not a SQS ARN", sqs_queue_arn)
        sys.exit(1)
    sqs_conn = get_connection(sqs_queue_arn, disable_chaos=True)
    sqs_queue = get_arn_from_arn_string(sqs_queue_arn).colon_resource()
Exemplo n.º 36
0
def get_symbol_size(elf, name):
    symbol_table = elf.get_section_by_name('.symtab')
    symbol = symbol_table.get_symbol_by_name(name)
    if not symbol:
        logging.fatal("No symbol {0}".format(name))
    return symbol['st_size']
Exemplo n.º 37
0
    if args.net == 'vgg16-ssd':
        net = create_vgg_ssd(len(class_names), is_test=True)
    elif args.net == 'mb1-ssd':
        net = create_mobilenetv1_ssd(len(class_names), is_test=True)
    elif args.net == 'mb1-ssd-lite':
        net = create_mobilenetv1_ssd_lite(len(class_names), is_test=True)
    elif args.net == 'sq-ssd-lite':
        net = create_squeezenet_ssd_lite(len(class_names), is_test=True)
    elif args.net == 'mb2-ssd-lite':
        net = create_mobilenetv2_ssd_lite(len(class_names),
                                          width_mult=args.mb2_width_mult,
                                          is_test=True)
    else:
        logging.fatal(
            "The net type is wrong. It should be one of vgg16-ssd, mb1-ssd and mb1-ssd-lite."
        )
        parser.print_help(sys.stderr)
        sys.exit(1)

    timer.start("Load Model")
    net.load(args.trained_model)
    net = net.to(DEVICE)
    print(f'It took {timer.end("Load Model")} seconds to load the model.')
    if args.net == 'vgg16-ssd':
        predictor = create_vgg_ssd_predictor(net,
                                             nms_method=args.nms_method,
                                             device=DEVICE)
    elif args.net == 'mb1-ssd':
        predictor = create_mobilenetv1_ssd_predictor(
            net, nms_method=args.nms_method, device=DEVICE)
Exemplo n.º 38
0
def main() -> None:
    common.init("client")

    common.system_check()

    parser = argparse.ArgumentParser(
        description="PTD client",
        formatter_class=lambda prog: argparse.RawDescriptionHelpFormatter(
            prog, max_help_position=35),
    )

    optional = parser._action_groups.pop()
    required = parser.add_argument_group("required arguments")

    # fmt: off
    required.add_argument("-a",
                          "--addr",
                          metavar="ADDR",
                          type=str,
                          required=True,
                          help="server address")
    required.add_argument(
        "-w",
        "--run-workload",
        metavar="CMD",
        type=str,
        required=True,
        help="a shell command to run under power measurement")
    required.add_argument("-L",
                          "--loadgen-logs",
                          metavar="INDIR",
                          type=str,
                          required=True,
                          help="collect loadgen logs from INDIR")
    required.add_argument("-o",
                          "--output",
                          metavar="OUTDIR",
                          type=str,
                          required=True,
                          help="put logs into OUTDIR (copied from INDIR)")
    required.add_argument("-n",
                          "--ntp",
                          metavar="ADDR",
                          type=str,
                          required=True,
                          help="NTP server address")

    parser.add_argument("-T",
                        "--no-timestamp-path",
                        action="store_true",
                        help="don't add timestamp to the logs path")
    parser.add_argument("-t",
                        "--timestamp-path",
                        action="store_false",
                        dest="no_timestamp_path",
                        help="add timestamp to the logs path [default]")
    parser.add_argument("-p",
                        "--port",
                        metavar="PORT",
                        type=int,
                        default=4950,
                        help="server port, defaults to 4950")
    parser.add_argument(
        "-l",
        "--label",
        metavar="LABEL",
        type=str,
        default="",
        help="a label to include into the resulting directory name")
    parser.add_argument("-f",
                        "--force",
                        action="store_true",
                        help="force remove loadgen logs directory (INDIR)")
    parser.add_argument("-S",
                        "--stop-server",
                        action="store_true",
                        help="stop the server after processing this client")
    # fmt: on
    common.log_redirect.start()

    parser._action_groups.append(optional)
    args = parser.parse_args()

    if not common.check_label(args.label):
        parser.error(
            "invalid --label value: {args.label!r}. Should be alphanumeric or -_."
        )

    if args.port is None:
        args.port = common.DEFAULT_PORT
        logging.warning(f"Assuming default port (--port {common.DEFAULT_PORT}")

    check_paths(args.loadgen_logs, args.output, args.force)

    common.mkdir_if_ne(args.output)

    s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    try:
        s.connect((args.addr, args.port))
    except OSError as e:
        s.close()
        logging.fatal(
            f"Could not connect to the server {args.addr}:{args.port} {e}")
        exit(1)

    serv = common.Proto(s)
    serv.enable_keepalive()

    summary = summarylib.Summary()

    command = CommandSender(serv, summary)

    # TODO: timeout and max msg size for recv
    magic = command(common.MAGIC_CLIENT)
    if magic != common.MAGIC_SERVER:
        logging.error(
            f"Handshake failed, expected {common.MAGIC_SERVER!r}, got {magic!r}"
        )
        exit(1)
    del magic

    if args.stop_server:
        # Enable the "stop" flag on the server so it will stop after the client
        # disconnects.  We are sending this early to make sure the server
        # eventually will stop even if the client crashes unexpectedly.
        command("stop", check=True)

    def sync_check() -> None:
        if not time_sync.sync(
                args.ntp,
                lambda: float(command("time")),
                lambda: command("set_ntp"),
        ):
            exit()

    sync_check()

    summary.client_uuid = uuid.uuid4()
    try:
        session = command(f"new,{args.label},{summary.client_uuid}")
        if session is None or not session.startswith("OK "):
            logging.fatal("Could not start new session")
            exit(1)
        session, server_uuid = session[len("OK "):].split(",")
    except Exception:
        exit(1)
    summary.server_uuid = uuid.UUID(server_uuid)
    summary.session_name = session
    logging.info(f"Session id is {session!r}")

    common.log_sources()
    if args.no_timestamp_path:
        out_dir = args.output
        power_dir = os.path.join(args.output, "power")
    else:
        out_dir = os.path.join(args.output, session)
        os.mkdir(out_dir)
        power_dir = os.path.join(args.output, session, "power")
    os.mkdir(power_dir)

    for mode in ["ranging", "testing"]:
        logging.info(f"Running workload in {mode} mode")
        out = os.path.join(out_dir, "run_1" if mode == "testing" else mode)

        sync_check()

        summary.phase(mode, 0)
        command(f"session,{session},start,{mode}", check=True)

        summary.phase(mode, 1)
        logging.info(f"Running the workload {args.run_workload!r}")
        time_load_start = time.time()
        subprocess.run(args.run_workload, shell=True, check=True)
        time_load_end = time.time()
        summary.phase(mode, 2)

        command(f"session,{session},stop,{mode}", check=True)
        summary.phase(mode, 3)

        loadgen_logs = find_loadgen_logs(args.loadgen_logs, time_load_start,
                                         time_load_end)

        if not loadgen_logs:
            logging.fatal(
                f"Expected {args.loadgen_logs!r} to be a directory containing loadgen logs, but it is not"
            )
            logging.fatal(
                "Please make sure that the provided workload command writes its "
                "output into the directory specified by the --loadgen-logs/-L argument"
            )
            exit(1)

        logging.info(f"Copying loadgen logs from {loadgen_logs!r} to {out!r}")
        os.mkdir(out)
        for file in [LOADGEN_LOG_FILE] + LOADGEN_OTHER_FILES:
            shutil.copy(os.path.join(loadgen_logs, file), out)

    logging.info("Done runs")

    client_log_path = os.path.join(power_dir, "client.log")
    common.log_redirect.stop(client_log_path)

    summary.hash_results(out_dir)

    client_json_path = os.path.join(power_dir, "client.json")
    summary.save(client_json_path)

    command(f"session,{session},done", check=True)

    for fname in common.FETCH_FILES_LIST:
        command.download(f"download,{session},{fname}",
                         os.path.join(out_dir, fname))

    command(f"cleanup,{session}", check=True)

    logging.info("Successful exit")
Exemplo n.º 39
0
                try:
                    stage_nprocessors = __settings.icell8['nprocessors_%s' %
                                                          stage]
                except KeyError:
                    pass
            nprocessors[stage] = stage_nprocessors

    # Check for clashing -u/-p
    if args.project and args.unaligned_dir:
        logger.fatal("Cannot specify -u and -p together")
        sys.exit(1)

    # Check for contaminant filtering inputs
    if args.mammalian_conf is None or \
       not os.path.isfile(args.mammalian_conf):
        logging.fatal("Mammalian genome panel not specified "
                      "or doesn't exist (-m)")
        sys.exit(1)
    if args.contaminants_conf is None or \
       not os.path.isfile(args.contaminants_conf):
        logging.fatal("Contaminant genome panel not specified "
                      "or doesn't exist (-c)")
        sys.exit(1)

    # Output dir
    if args.outdir is None:
        if args.project:
            outdir = args.project
        else:
            outdir = "icell8"
    else:
        outdir = args.outdir
Exemplo n.º 40
0
import logging
import sys

try:
    import hypchat
except ImportError:
    logging.exception("Could not start the HipChat backend")
    logging.fatal(
        "You need to install the hypchat package in order to use the HipChat "
        "back-end. You should be able to install this package using: "
        "pip install hypchat")
    sys.exit(1)

from errbot import holder
from errbot.backends.base import MUCOccupant, MUCRoom, RoomDoesNotExistError
from errbot.backends.xmpp import XMPPBackend, XMPPConnection
from errbot.utils import parse_jid


class HipChatMUCOccupant(MUCOccupant):
    """
    An occupant of a Multi-User Chatroom.

    This class has all the attributes that are returned by a call to
    https://www.hipchat.com/docs/apiv2/method/get_all_participants
    with the link to self expanded.
    """
    def __init__(self, user):
        """
        :param user:
            A user object as returned by
Exemplo n.º 41
0
    def get_tf_layer(self, genome: 'CnnGenome') -> keras.layers.Layer:
        """
        A description of how this method works to construct a complete TensorFlow computation graph can be found
        in the documentation for the CnnGenome::create_model.
        
        This should never return None. Takes all of the tensors from DenseEdges and adds them together, then pushes
        them through an activation function. Then adds a series of fully connected layers then the classification layer.
        Returns this as a Tensor of course.
        """

        if self.tf_layer is not None:
            return self.tf_layer

        # To make it easy to inherit epigenetic weights, we seperate the weights out into separate
        # layers without activation functions, and add the resulting layers together and then apply
        # an activation function.
        # So these intermediate layers shouldn't have an activation function
        maybe_intermediate_layers: List[Optional[tf.Tensor]] = list(
            map(lambda edge_in: genome.edge_map[edge_in].get_tf_layer(genome),
                self.inputs))

        # filter out nones
        intermediate_layers: List[tf.Tensor] = [
            x for x in maybe_intermediate_layers if x is not None
        ]

        if not intermediate_layers:
            logging.fatal(f"output layer has no inputs!")

        assert intermediate_layers

        layer: tf.Tensor = None

        # All but the last dense layer should have the hyper parameter specified activation type.
        # The last layer should be a classification activation type life softmax or svm
        for i, size in enumerate(self.dense_layers):

            # if layer is none we haven't used the intermediate layers yet, so we have to sum them if
            # there is more than one (if theres only one it will throw an error)
            if layer is None:
                if len(intermediate_layers) > 1:
                    layer = keras.layers.Add()(intermediate_layers)
                else:
                    layer = intermediate_layers[0]
            else:
                shape = layer.shape[1:]
                layer = \
                    keras.layers.Dense(
                            size,
                            input_shape=shape,
                            activation='linear',
                            kernel_regularizer=get_regularizer(genome.hp),
                            bias_regularizer=get_regularizer(genome.hp),
                            name=self.get_name() + f"_{i}")(layer)

            if i == len(self.dense_layers) - 1:
                layer = make_classification_layer()(layer)
            else:
                layer = make_activation_layer()(layer)

        self.tf_layer = layer

        return self.tf_layer
Exemplo n.º 42
0
    def __call__(self, input_or_adv, label=None, unpack=True,
                 binary_search_steps=5, max_iterations=1000,
                 confidence=0, learning_rate=5e-3,
                 initial_const=1e-2, abort_early=True):

        """The L2 version of the Carlini & Wagner attack.

        Parameters
        ----------
        input_or_adv : `numpy.ndarray` or :class:`Adversarial`
            The original, unperturbed input as a `numpy.ndarray` or
            an :class:`Adversarial` instance.
        label : int
            The reference label of the original input. Must be passed
            if `a` is a `numpy.ndarray`, must not be passed if `a` is
            an :class:`Adversarial` instance.
        unpack : bool
            If true, returns the adversarial input, otherwise returns
            the Adversarial object.
        binary_search_steps : int
            The number of steps for the binary search used to
            find the optimal tradeoff-constant between distance and confidence.
        max_iterations : int
            The maximum number of iterations. Larger values are more
            accurate; setting it too small will require a large learning rate
            and will produce poor results.
        confidence : int or float
            Confidence of adversarial examples: a higher value produces
            adversarials that are further away, but more strongly classified
            as adversarial.
        learning_rate : float
            The learning rate for the attack algorithm. Smaller values
            produce better results but take longer to converge.
        initial_const : float
            The initial tradeoff-constant to use to tune the relative
            importance of distance and confidence. If `binary_search_steps`
            is large, the initial constant is not important.
        abort_early : bool
            If True, Adam will be aborted if the loss hasn't decreased
            for some time (a tenth of max_iterations).

        """

        a = input_or_adv
        del input_or_adv
        del label
        del unpack

        if not a.has_gradient():
            logging.fatal('Applied gradient-based attack to model that '
                          'does not provide gradients.')
            return

        min_, max_ = a.bounds()

        def to_attack_space(x):
            # map from [min_, max_] to [-1, +1]
            a = (min_ + max_) / 2
            b = (max_ - min_) / 2
            x = (x - a) / b

            # from [-1, +1] to approx. (-1, +1)
            x = x * 0.999999

            # from (-1, +1) to (-inf, +inf)
            return np.arctanh(x)

        def to_model_space(x):
            """Transforms an input from the attack space
            to the model space. This transformation and
            the returned gradient are elementwise."""

            # from (-inf, +inf) to (-1, +1)
            x = np.tanh(x)

            grad = 1 - np.square(x)

            # map from (-1, +1) to (min_, max_)
            a = (min_ + max_) / 2
            b = (max_ - min_) / 2
            x = x * b + a

            grad = grad * b
            return x, grad

        # variables representing inputs in attack space will be
        # prefixed with att_
        att_original = to_attack_space(a.unperturbed)

        # will be close but not identical to a.unperturbed
        reconstructed_original, _ = to_model_space(att_original)

        # the binary search finds the smallest const for which we
        # find an adversarial
        const = initial_const
        lower_bound = 0
        upper_bound = np.inf

        for binary_search_step in range(binary_search_steps):
            if binary_search_step == binary_search_steps - 1 and \
                    binary_search_steps >= 10:
                # in the last binary search step, use the upper_bound instead
                # TODO: find out why... it's not obvious why this is useful
                const = min(1e10, upper_bound)

            logging.info('starting optimization with const = {}'.format(const))

            att_perturbation = np.zeros_like(att_original)

            # create a new optimizer to minimize the perturbation
            optimizer = AdamOptimizer(att_perturbation.shape)

            found_adv = False  # found adv with the current const
            loss_at_previous_check = np.inf

            for iteration in range(max_iterations):
                x, dxdp = to_model_space(att_original + att_perturbation)
                logits, is_adv = a.forward_one(x)
                loss, dldx = self.loss_function(
                    const, a, x, logits, reconstructed_original,
                    confidence, min_, max_)

                logging.info('loss: {}; best overall distance: {}'.format(loss, a.distance))

                # backprop the gradient of the loss w.r.t. x further
                # to get the gradient of the loss w.r.t. att_perturbation
                assert dldx.shape == x.shape
                assert dxdp.shape == x.shape
                # we can do a simple elementwise multiplication, because
                # grad_x_wrt_p is a matrix of elementwise derivatives
                # (i.e. each x[i] w.r.t. p[i] only, for all i) and
                # grad_loss_wrt_x is a real gradient reshaped as a matrix
                gradient = dldx * dxdp

                att_perturbation += optimizer(gradient, learning_rate)

                if is_adv:
                    # this binary search step can be considered a success
                    # but optimization continues to minimize perturbation size
                    found_adv = True

                if abort_early and \
                        iteration % (np.ceil(max_iterations / 10)) == 0:
                    # after each tenth of the iterations, check progress
                    if not (loss <= .9999 * loss_at_previous_check):
                        break  # stop Adam if there has not been progress
                    loss_at_previous_check = loss

            if found_adv:
                logging.info('found adversarial with const = {}'.format(const))
                upper_bound = const
            else:
                logging.info('failed to find adversarial '
                             'with const = {}'.format(const))
                lower_bound = const

            if upper_bound == np.inf:
                # exponential search
                const *= 10
            else:
                # binary search
                const = (lower_bound + upper_bound) / 2
Exemplo n.º 43
0
def main(args):
    FLAGS(args)
    if FLAGS.verbose:
        logging.basicConfig(level=logging.INFO)
    if FLAGS.debug:
        logging.basicConfig(level=logging.DEBUG)
    logging.debug(
        'binary: %s\noptimize: %d\nbase_directory: %s\n'
        'policy_file: %s\nrendered_acl_directory: %s', str(sys.argv[0]),
        int(FLAGS.optimize), str(FLAGS.base_directory), str(FLAGS.policy_file),
        str(FLAGS.output_directory))

    definitions = None
    try:
        definitions = naming.Naming(FLAGS.definitions_directory)
    except naming.NoDefinitionsError:
        logging.fatal('bad definitions directory: %s',
                      FLAGS.definitions_directory)

    # thead-safe list for storing files to write
    manager = multiprocessing.Manager()
    write_files = manager.list()

    with_errors = False
    if FLAGS.policy_file:
        # render just one file
        logging.info('rendering one file')
        RenderFile(FLAGS.policy_file, FLAGS.output_directory, definitions,
                   FLAGS.exp_info, write_files)
    else:
        # render all files in parallel
        logging.info('finding policies...')
        pols = []
        pols.extend(
            DescendRecursively(FLAGS.base_directory, FLAGS.output_directory,
                               definitions))

        pool = multiprocessing.Pool(processes=FLAGS.max_renderers)
        results = []
        for x in pols:
            results.append(
                pool.apply_async(RenderFile,
                                 args=(x.get('in_file'), x.get('out_dir'),
                                       definitions, FLAGS.exp_info,
                                       write_files)))
        pool.close()
        pool.join()

        for result in results:
            try:
                result.get()
            except (ACLParserError, ACLGeneratorError) as e:
                with_errors = True
                logging.warn(
                    '\n\nerror encountered in rendering process:\n%s\n\n', e)

    # actually write files to disk
    WriteFiles(write_files)

    if with_errors:
        logging.warn('done, with errors.')
        sys.exit(1)
    else:
        logging.info('done.')
Exemplo n.º 44
0
import logging

try:
    from flask import Flask
    from flask import request
except ImportError:
    logging.fatal(
        "Failed to import Flask. Flask must be installed to run tests.")

import json

server = Flask(__name__)


def test_failed():
    return json.dumps({
        'test': 'failed'
    })


def no_record_for_card(card, token=None):
    response = {
        'msga': 'No record found for ' + card,
        'msgb': 'TEST SUCCESS',
        'err': '1'
    }
    if token is not None:
        response['token'] = token
    return json.dumps(response)

Exemplo n.º 45
0
polling_interval: int = int(os.getenv('POLLING_INTERVAL', '20'))
docker_host: str = os.getenv('DOCKER_HOSTMACHINE', 'UNKNOWN')
slack_webhook_url: str = os.getenv('SLACK_WEBHOOK_URL', '')
email_sender: str = os.getenv('EMAIL_SENDER', '')
email_receiver: str = os.getenv('EMAIL_RECEIVER', '')
smtp_server: str = os.getenv('SMTP_SERVER', '')
restarted_containers: list = []
notification_content: dict = {}

# Test and establish connection to docker socket
try:
    CLIENT = docker.from_env()
    CLIENT.version()
    logging.info("Connection to Docker socket OK")
except Exception as err:
    logging.fatal("%s", err)
    sys.exit()


def send_slack_message(content) -> None:
    if slack_webhook_url != "":
        try:
            requests.post(slack_webhook_url,
                          data=json.dumps(content),
                          headers={'Content-Type': 'application/json'})
            logging.info("Message sent to Slack webhook: %s", content['text'])
        except (requests.exceptions.Timeout, ConnectionError) as err:
            logging.error("%s", err)


def send_smtp_message(content) -> None:
Exemplo n.º 46
0
        API_Data = Load_Logchain_API_Configuration()

        if Config_Data and API_Data:
            logging.info(f"{str(Date())} SIEM Chain Agent log backup initialising.")

            for Directory in Config_Data['source_log_directories']:
                Target_Directory = Config_Data['target_backup_directory']
                Backup_File = Make_File(Directory, Target_Directory)
                Temp_Directory = '/tmp/Logchain/agent'

                if os.path.exists(Temp_Directory):
                    shutil.rmtree(Temp_Directory)

                os.makedirs(Temp_Directory)
                copy_tree(Directory, Temp_Directory)

                for File in os.listdir(Temp_Directory):
                    Sync_Directory(Temp_Directory, Directory, File, API_Data, Backup_File)

                Compress_Directory(os.listdir(Temp_Directory), Temp_Directory, Backup_File)
                shutil.rmtree(Temp_Directory)

            logging.info(f"{str(Date())} SIEM Chain Agent log backup complete")

        else:
            sys.exit(f"{str(Date())} Loading configuration failed.")

    except Exception as e:
        logging.fatal(f"{str(Date())} {str(e)}")
        sys.exit()
Exemplo n.º 47
0
def main():
    logging_setup.DefineCommandLineArgs(parser)
    args = parser.parse_args()
    logging_setup.Init(args=args)

    if not args.name or not args.tarball:
        logging.fatal('--name and --tarball are required arguments.')
        sys.exit(1)

    retry_factory = retry.Factory()
    retry_factory = retry_factory.WithSourceTransportCallable(httplib2.Http)
    transport = transport_pool.Http(retry_factory.Build, size=8)

    if '@' in args.name:
        name = docker_name.Digest(args.name)
    else:
        name = docker_name.Tag(args.name)

    # OCI Image Manifest is compatible with Docker Image Manifest Version 2,
    # Schema 2. We indicate support for both formats by passing both media types
    # as 'Accept' headers.
    #
    # For reference:
    #   OCI: https://github.com/opencontainers/image-spec
    #   Docker: https://docs.docker.com/registry/spec/manifest-v2-2/
    accept = docker_http.SUPPORTED_MANIFEST_MIMES

    # Resolve the appropriate credential to use based on the standard Docker
    # client logic.
    try:
        creds = docker_creds.DefaultKeychain.Resolve(name)
    # pylint: disable=broad-except
    except Exception as e:
        logging.fatal('Error resolving credentials for %s: %s', name, e)
        sys.exit(1)

    try:
        with tarfile.open(name=args.tarball, mode='w:') as tar:
            logging.info('Pulling manifest list from %r ...', name)
            with image_list.FromRegistry(name, creds, transport) as img_list:
                if img_list.exists():
                    platform = image_list.Platform({
                        'architecture': _PROCESSOR_ARCHITECTURE,
                        'os': _OPERATING_SYSTEM,
                    })
                    # pytype: disable=wrong-arg-types
                    with img_list.resolve(platform) as default_child:
                        save.tarball(_make_tag_if_digest(name), default_child,
                                     tar)
                        return
                    # pytype: enable=wrong-arg-types

            logging.info('Pulling v2.2 image from %r ...', name)
            with v2_2_image.FromRegistry(name, creds, transport,
                                         accept) as v2_2_img:
                if v2_2_img.exists():
                    save.tarball(_make_tag_if_digest(name), v2_2_img, tar)
                    return

            logging.info('Pulling v2 image from %r ...', name)
            with v2_image.FromRegistry(name, creds, transport) as v2_img:
                with v2_compat.V22FromV2(v2_img) as v2_2_img:
                    save.tarball(_make_tag_if_digest(name), v2_2_img, tar)
                    return
    # pylint: disable=broad-except
    except Exception as e:
        logging.fatal('Error pulling and saving image %s: %s', name, e)
        sys.exit(1)
Exemplo n.º 48
0
    type=int,
    default=20,
    help='percent [0-100] of images to use for testing [default: 20]')

args = parser.parse_args()

## Build train/validate/test sets

random_state = np.random.RandomState(args.seed)

## Check inputs

train_percent = 100 - args.validate_percent - args.test_percent

if train_percent <= 0:
    logging.fatal("no data for training! check validate, test percentages")
    sys.exit(1)
elif train_percent < 50:
    logging.warning(
        "only {:.1f}% of data available for training".format(train_percent))
else:
    logging.info("targeting train/validate/test percent = {}/{}/{}".format(
        train_percent, args.validate_percent, args.test_percent))

## Read through all records and split them up

test_threshold = args.test_percent / 100.0
validate_threshold = test_threshold + args.validate_percent / 100.0

all_records = json.load(args.all_json)
train, validate, test = [], [], []
Exemplo n.º 49
0
    def __addEpisodesToTraktCollection(self, kodiShows, traktShows):
        if utilities.getSettingAsBool(
                'add_episodes_to_trakt') and not self.__isCanceled():
            addTraktShows = copy.deepcopy(traktShows)
            addKodiShows = copy.deepcopy(kodiShows)

            tmpTraktShowsAdd = self.__compareShows(addKodiShows, addTraktShows)
            traktShowsAdd = copy.deepcopy(tmpTraktShowsAdd)
            self.sanitizeShows(traktShowsAdd)
            #logger.debug("traktShowsAdd %s" % traktShowsAdd)

            if len(traktShowsAdd['shows']) == 0:
                self.__updateProgress(48,
                                      line1=utilities.getString(32068),
                                      line2=utilities.getString(32104))
                logger.debug(
                    "[Episodes Sync] trakt.tv episode collection is up to date."
                )
                return
            logger.debug(
                "[Episodes Sync] %i show(s) have episodes (%d) to be added to your trakt.tv collection."
                % (len(traktShowsAdd['shows']),
                   self.__countEpisodes(traktShowsAdd)))
            for show in traktShowsAdd['shows']:
                logger.debug("[Episodes Sync] Episodes added: %s" %
                             self.__getShowAsString(show, short=True))

            self.__updateProgress(33,
                                  line1=utilities.getString(32068),
                                  line2=utilities.getString(32067) %
                                  (len(traktShowsAdd['shows'])),
                                  line3=" ")

            #split episode list into chunks of 50
            chunksize = 1
            chunked_episodes = utilities.chunks(traktShowsAdd['shows'],
                                                chunksize)
            errorcount = 0
            i = 0
            x = float(len(traktShowsAdd['shows']))
            for chunk in chunked_episodes:
                if self.__isCanceled():
                    return
                i += 1
                y = ((i / x) * 16) + 33
                self.__updateProgress(int(y),
                                      line2=utilities.getString(32069) %
                                      ((i) * chunksize if
                                       (i) * chunksize < x else x, x))

                request = {'shows': chunk}
                logger.debug("[traktAddEpisodes] Shows to add %s" % request)
                try:
                    self.traktapi.addToCollection(request)
                except Exception as ex:
                    message = utilities.createError(ex)
                    logging.fatal(message)
                    errorcount += 1

            logger.debug("[traktAddEpisodes] Finished with %d error(s)" %
                         errorcount)
            self.__updateProgress(49,
                                  line2=utilities.getString(32105) %
                                  self.__countEpisodes(traktShowsAdd))
Exemplo n.º 50
0
def build_mask(masks,
               corner,
               subvol_size,
               mask_volume_map=None,
               image=None,
               alignment=None):
    """Builds a boolean mask.

  Args:
    masks: iterable of MaskConfig protos
    corner: lower corner of the subvolume for which to build the
        mask, as a (z, y, x) tuple
    subvol_size: size of the subvolume for which to build the mask,
        as a (z, y, x) tuple
    mask_volume_map: optional dict mapping volume proto hashes to open
        volumes; use this as a cache to avoid opening volumes
        multiple times.
    image: 3d image ndarray; only needed if the mask config uses
        the image as input
    alignment: optional Alignemnt object

  Returns:
    boolean mask built according to the specified config
  """
    final_mask = None
    if mask_volume_map is None:
        mask_volume_map = {}

    if alignment is None:
        alignment = align.Alignment(corner, subvol_size)  # identity

    src_corner, src_size = alignment.expand_bounds(corner,
                                                   subvol_size,
                                                   forward=False)
    for config in masks:
        curr_mask = np.zeros(subvol_size, dtype=np.bool)

        source_type = config.WhichOneof('source')
        if source_type == 'coordinate_expression':
            # pylint:disable=eval-used,unused-variable
            z, y, x = np.mgrid[src_corner[0]:src_corner[0] + src_size[0],
                               src_corner[1]:src_corner[1] + src_size[1],
                               src_corner[2]:src_corner[2] + src_size[2]]
            bool_mask = eval(config.coordinate_expression.expression)
            # pylint:enable=eval-used,unused-variable
            curr_mask |= alignment.align_and_crop(src_corner, bool_mask,
                                                  corner, subvol_size)
        else:
            if source_type == 'image':
                channels = config.image.channels
                mask = image[np.newaxis, ...]
            elif source_type == 'volume':
                channels = config.volume.channels

                volume_key = config.volume.mask.SerializeToString()
                if volume_key not in mask_volume_map:
                    mask_volume_map[volume_key] = decorated_volume(
                        config.volume.mask)
                volume = mask_volume_map[volume_key]

                clipped_corner, clipped_size = clip_subvolume_to_bounds(
                    src_corner, src_size, volume)
                clipped_end = clipped_corner + clipped_size
                mask = volume[:,  #
                              clipped_corner[0]:clipped_end[0],  #
                              clipped_corner[1]:clipped_end[1],  #
                              clipped_corner[2]:clipped_end[2]]
            else:
                logging.fatal('Unsupported mask source: %s', source_type)

            for chan_config in channels:
                channel_mask = mask[chan_config.channel, ...]
                channel_mask = alignment.align_and_crop(
                    src_corner, channel_mask, corner, subvol_size)

                if chan_config.values:
                    bool_mask = np.in1d(channel_mask,
                                        chan_config.values).reshape(
                                            channel_mask.shape)
                else:
                    bool_mask = ((channel_mask >= chan_config.min_value) &
                                 (channel_mask <= chan_config.max_value))
                if chan_config.invert:
                    bool_mask = np.logical_not(bool_mask)

                curr_mask |= bool_mask

        if config.invert:
            curr_mask = np.logical_not(curr_mask)

        if final_mask is None:
            final_mask = curr_mask
        else:
            final_mask |= curr_mask

    return final_mask
Exemplo n.º 51
0
 def infra_error(message):
   logging.fatal(message)
   sys.exit(constants.INFRA_EXIT_CODE)
Exemplo n.º 52
0
    def __addEpisodesToTraktWatched(self, kodiShows, traktShows):
        if utilities.getSettingAsBool(
                'trakt_episode_playcount') and not self.__isCanceled():
            updateTraktTraktShows = copy.deepcopy(traktShows)
            updateTraktKodiShows = copy.deepcopy(kodiShows)

            traktShowsUpdate = self.__compareShows(updateTraktKodiShows,
                                                   updateTraktTraktShows,
                                                   watched=True)
            self.sanitizeShows(traktShowsUpdate)
            #logger.debug("traktShowsUpdate %s" % traktShowsUpdate)

            if len(traktShowsUpdate['shows']) == 0:
                self.__updateProgress(82,
                                      line1=utilities.getString(32071),
                                      line2=utilities.getString(32106))
                logger.debug(
                    "[Episodes Sync] trakt.tv episode playcounts are up to date."
                )
                return

            logger.debug(
                "[Episodes Sync] %i show(s) are missing playcounts on trakt.tv"
                % len(traktShowsUpdate['shows']))
            for show in traktShowsUpdate['shows']:
                logger.debug("[Episodes Sync] Episodes updated: %s" %
                             self.__getShowAsString(show, short=True))

            self.__updateProgress(66,
                                  line1=utilities.getString(32071),
                                  line2=utilities.getString(32070) %
                                  (len(traktShowsUpdate['shows'])),
                                  line3="")
            errorcount = 0
            i = 0
            x = float(len(traktShowsUpdate['shows']))
            for show in traktShowsUpdate['shows']:
                if self.__isCanceled():
                    return
                epCount = self.__countEpisodes([show])
                title = show['title'].encode('utf-8', 'ignore')
                i += 1
                y = ((i / x) * 16) + 66
                self.__updateProgress(int(y),
                                      line2=title,
                                      line3=utilities.getString(32073) %
                                      epCount)

                s = {'shows': [show]}
                logger.debug("[traktUpdateEpisodes] Shows to update %s" % s)
                try:
                    self.traktapi.addToHistory(s)
                except Exception as ex:
                    message = utilities.createError(ex)
                    logging.fatal(message)
                    errorcount += 1

            logger.debug("[traktUpdateEpisodes] Finished with %d error(s)" %
                         errorcount)
            self.__updateProgress(82,
                                  line2=utilities.getString(32072) %
                                  (len(traktShowsUpdate['shows'])),
                                  line3="")
Exemplo n.º 53
0
def RunBenchmark(benchmark, finder_options):
  """Run this test with the given options.

  Returns:
    1 if there is failure or 2 if there is an uncaught exception.
  """
  start = time.time()
  benchmark.CustomizeBrowserOptions(finder_options.browser_options)

  benchmark_metadata = benchmark.GetMetadata()
  possible_browser = browser_finder.FindBrowser(finder_options)
  expectations = benchmark.expectations

  target_platform = None
  if possible_browser:
    target_platform = possible_browser.platform
  else:
    target_platform = platform_module.GetHostPlatform()

  can_run_on_platform = benchmark._CanRunOnPlatform(
      target_platform, finder_options)

  expectations_disabled = False
  # For now, test expectations are only applicable in the cases where the
  # testing target involves a browser.
  if possible_browser:
    expectations_disabled = expectations.IsBenchmarkDisabled(
        possible_browser.platform, finder_options)

  if expectations_disabled or not can_run_on_platform:
    print '%s is disabled on the selected browser' % benchmark.Name()
    if finder_options.run_disabled_tests and can_run_on_platform:
      print 'Running benchmark anyway due to: --also-run-disabled-tests'
    else:
      if can_run_on_platform:
        print 'Try --also-run-disabled-tests to force the benchmark to run.'
      else:
        print ("This platform is not supported for this benchmark. If this is "
               "in error please add it to the benchmark's supported platforms.")
      # If chartjson is specified, this will print a dict indicating the
      # benchmark name and disabled state.
      with results_options.CreateResults(
          benchmark_metadata, finder_options,
          should_add_value=benchmark.ShouldAddValue,
          benchmark_enabled=False
          ) as results:
        results.PrintSummary()
      # When a disabled benchmark is run we now want to return success since
      # we are no longer filtering these out in the buildbot recipes.
      return 0

  pt = benchmark.CreatePageTest(finder_options)
  pt.__name__ = benchmark.__class__.__name__

  stories = benchmark.CreateStorySet(finder_options)

  if isinstance(pt, legacy_page_test.LegacyPageTest):
    if any(not isinstance(p, page.Page) for p in stories.stories):
      raise Exception(
          'PageTest must be used with StorySet containing only '
          'telemetry.page.Page stories.')

  with results_options.CreateResults(
      benchmark_metadata, finder_options,
      should_add_value=benchmark.ShouldAddValue,
      benchmark_enabled=True) as results:
    try:
      Run(pt, stories, finder_options, results, benchmark.max_failures,
          expectations=expectations, metadata=benchmark.GetMetadata(),
          max_num_values=benchmark.MAX_NUM_VALUES)
      return_code = 1 if results.had_failures else 0
      # We want to make sure that all expectations are linked to real stories,
      # this will log error messages if names do not match what is in the set.
      benchmark.GetBrokenExpectations(stories)
    except Exception: # pylint: disable=broad-except
      logging.fatal(
          'Benchmark execution interrupted by a fatal exception.')
      results.telemetry_info.InterruptBenchmark()
      exception_formatter.PrintFormattedException()
      return_code = 2

    benchmark_owners = benchmark.GetOwners()
    benchmark_component = benchmark.GetBugComponents()

    if benchmark_owners:
      results.AddSharedDiagnostic(
          reserved_infos.OWNERS.name, benchmark_owners)

    if benchmark_component:
      results.AddSharedDiagnostic(
          reserved_infos.BUG_COMPONENTS.name, benchmark_component)

    try:
      if finder_options.upload_results:
        results.UploadTraceFilesToCloud()
        results.UploadArtifactsToCloud()
    finally:
      duration = time.time() - start
      results.AddSummaryValue(scalar.ScalarValue(
          None, 'benchmark_duration', 'minutes', duration / 60.0))
      results.AddDurationHistogram(duration * 1000.0)
      memory_debug.LogHostMemoryUsage()
      results.PrintSummary()
  return return_code
Exemplo n.º 54
0
Arquivo: run.py Projeto: q587p/bnw
if __name__=="__main__":
    import logging
    import os
    fr = os.fork()
    if fr < 0:
        logging.fatal('Unable to fork')
    elif fr == 0:
        from coverage import coverage
        #coverage.process_startup()
        cov = coverage(config_file=False, auto_data=True)
        cov.start()
        import atexit
        atexit.register(lambda: cov.stop())

        import sys
        from os import path
        sys.path.insert(0, path.dirname(__file__))
        while sys.argv:
            sys.argv.pop()
        sys.argv.append('bnw')
        sys.argv.append('-n')
        sys.argv.append('-l')
        sys.argv.append('test_server.log')
#        sys.argv.append('--pidfile=testinst.pid')
        from bnw.scripts.entry import instance
        instance()
    else:
        import sys
        from twisted.python import log
        log.startLogging(sys.stdout)
Exemplo n.º 55
0
def create_decoder():
    """Creates the ``Decoder`` instance. This specifies the search 
    strategy used to traverse the space spanned by the predictors. This
    method relies on the global ``args`` variable.
    
    TODO: Refactor to avoid long argument lists
    
    Returns:
        Decoder. Instance of the search strategy
    """
    # Create decoder instance and add predictors
    decoder = None
    try:
        if args.decoder == "greedy":
            decoder = GreedyDecoder(args)
        elif args.decoder == "beam":
            decoder = BeamDecoder(args)
        elif args.decoder == "multisegbeam":
            decoder = MultisegBeamDecoder(args,
                                          args.hypo_recombination,
                                          args.beam,
                                          args.multiseg_tokenizations,
                                          args.early_stopping,
                                          args.max_word_len)
        elif args.decoder == "syncbeam":
            decoder = SyncBeamDecoder(args)
        elif args.decoder == "fstbeam":
            decoder = FSTBeamDecoder(args)
        elif args.decoder == "predlimitbeam":
            decoder = PredLimitBeamDecoder(args)
        elif args.decoder == "mbrbeam":
            decoder = MBRBeamDecoder(args)
        elif args.decoder == "lenbeam":
            decoder = LengthBeamDecoder(args)
        elif args.decoder == "sepbeam":
            decoder = SepBeamDecoder(args)
        elif args.decoder == "syntaxbeam":
            decoder = SyntaxBeamDecoder(args)
        elif args.decoder == "combibeam":
            decoder = CombiBeamDecoder(args)
        elif args.decoder == "dfs":
            decoder = DFSDecoder(args)
        elif args.decoder == "simpledfs":
            decoder = SimpleDFSDecoder(args)
        elif args.decoder == "simplelendfs":
            decoder = SimpleLengthDFSDecoder(args)
        elif args.decoder == "restarting":
            decoder = RestartingDecoder(args,
                                        args.hypo_recombination,
                                        args.max_node_expansions,
                                        args.low_decoder_memory,
                                        args.restarting_node_score,
                                        args.stochastic_decoder,
                                        args.decode_always_single_step)
        elif args.decoder == "flip":
            decoder = FlipDecoder(args)
        elif args.decoder == "bigramgreedy":
            decoder = BigramGreedyDecoder(args)
        elif args.decoder == "bucket":
            decoder = BucketDecoder(args,
                                    args.hypo_recombination,
                                    args.max_node_expansions,
                                    args.low_decoder_memory,
                                    args.beam,
                                    args.pure_heuristic_scores,
                                    args.decoder_diversity_factor,
                                    args.early_stopping,
                                    args.stochastic_decoder,
                                    args.bucket_selector,
                                    args.bucket_score_strategy,
                                    args.collect_statistics)
        elif args.decoder == "astar":
            decoder = AstarDecoder(args)
        elif args.decoder == "dijkstra":
            decoder = DijkstraDecoder(args)
        elif args.decoder == "dijkstra_ts":
            decoder = DijkstraTSDecoder(args)
        else:
            logging.fatal("Decoder %s not available. Please double-check the "
                          "--decoder parameter." % args.decoder)
    except Exception as e:
        logging.fatal("An %s has occurred while initializing the decoder: %s"
                      " Stack trace: %s" % (sys.exc_info()[0],
                                            e,
                                            traceback.format_exc()))
    if decoder is None:
        sys.exit("Could not initialize decoder.")
    add_predictors(decoder)
    # Add heuristics for search strategies like A*
    if args.heuristics:
        add_heuristics(decoder)
    return decoder
Exemplo n.º 56
0
            df_2 = parsingDetail(df_1)
            header.outputCsv(df_2, "第二層結果")
            header.RESULT_COUNT = len(df_2)
        header.zipFile()
        header.createInfoFile()
        header.createOKFile()
        header.outputLastResult(df_1, header.lastResult,
                                checkRange)  #[2019.02.11]新增產出lastResult方法
    except:
        logging.error("執行爬網作業失敗")
        traceback.print_exc()
        header.createInfoFile()

    header.processEnd()


# In[57]:

print(header.TIMELABEL)
logging.fatal("FINAL_PATH:" + header.FINAL_PATH)
url = "https://www.ib.gov.tw/ch/home.jsp?id=38&parentpath=0,3"

# In[58]:

main(url)

# In[ ]:

# In[ ]:
Exemplo n.º 57
0
    if ARGS['make_config'] is True:
        make_config()
        sys.exit(0)
    PARSER = SafeConfigParser()
    PARSER.read(ARGS['config'])
    logging.basicConfig(level=int(ARGS['log_level']),
                        format=PARSER.get('logging', 'log_format'),
                        datefmt=PARSER.get('logging', 'log_datefmt'))

    # We only instantiate our AMQP class *once*. After successfully connecting
    # and declaring our queue we then handle further connection issues within
    # the class itself. However, the queue declaration does need to succeed.
    try:
        QUEUE = RabbitMQ(PARSER, queue_name=ARGS['queue'])
    except ConnectionError:
        logging.fatal('Unable to create AMQP object')
        logging.fatal(traceback.format_exc())
        sys.exit(1)
    except AttributeError:
        # Can happen if we cannot declare our queue
        logging.fatal('Unable to declare our queue')
        logging.fatal(traceback.format_exc())
        sys.exit(1)
    except TimeoutError:
        logging.fatal('Timeout attempting to connect')
        logging.fatal(traceback.format_exc())
        sys.exit(1)

    # Our main loop that consumes emitted logs and pushes them into our queue
    while True:
        try:
Exemplo n.º 58
0
def do_decode(decoder, 
              output_handlers, 
              src_sentences):
    """This method contains the main decoding loop. It iterates through
    ``src_sentences`` and applies ``decoder.decode()`` to each of them.
    At the end, it calls the output handlers to create output files.
    
    Args:
        decoder (Decoder):  Current decoder instance
        output_handlers (list):  List of output handlers, see
                                 ``create_output_handlers()``
        src_sentences (list):  A list of strings. The strings are the
                               source sentences with word indices to 
                               translate (e.g. '1 123 432 2')
    """
    if not decoder.has_predictors():
        logging.fatal("Terminated due to an error in the "
                      "predictor configuration.")
        return
    all_hypos = []
    text_output_handler = _get_text_output_handler(output_handlers)
    if text_output_handler:
        text_output_handler.open_file()
    start_time = time.time()
    logging.info("Start time: %s" % start_time)
    sen_indices = []
    counts = []
    for sen_idx in get_sentence_indices(args.range, src_sentences):
        decoder.set_current_sen_id(sen_idx)
        try:
            src = "0" if src_sentences is False else src_sentences[sen_idx]
            if len(src.split()) > 1000:
                print("Skipping ID", str(sen_idx), ". Too long...")
                continue
            src_print = io.src_sentence(src)
            logging.info("Next sentence (ID: %d): %s" % (sen_idx + 1, src_print))
            src = _apply_per_sentence_predictor_weights(src, decoder)
            src = io.encode(src)
            start_hypo_time = time.time()
            decoder.apply_predictors_count = 0
            hypos, count = decoder.decode(src)
            counts.append(count)
            
            if not hypos:
                logging.error("No translation found for ID %d!" % (sen_idx+1))
                logging.info("Stats (ID: %d): score=<not-found> "
                         "num_expansions=%d "
                         "time=%.2f" % (sen_idx+1,
                                        decoder.apply_predictors_count,
                                        time.time() - start_hypo_time))
                hypos = [_generate_dummy_hypo(decoder.predictors)]
            
            hypos = _postprocess_complete_hypos(hypos)
            logging.info("Decoded (ID: %d): %s" % (
                    sen_idx+1,
                    io.decode(hypos[0].trgt_sentence)))
            logging.info("Stats (ID: %d): score=%f "
                         "num_expansions=%d "
                         "time=%.2f" % (sen_idx+1,
                                        hypos[0].total_score,
                                        decoder.apply_predictors_count,
                                        time.time() - start_hypo_time))
            all_hypos.append(hypos)
            sen_indices.append(sen_idx)
            try:
                # Write text output as we go
                if text_output_handler:
                    text_output_handler.write_hypos([hypos])
            except IOError as e:
                logging.error("I/O error %d occurred when creating output files: %s"
                            % (sys.exc_info()[0], e))
        except ValueError as e:
            logging.error("Number format error at sentence id %d: %s, "
                          "Stack trace: %s" % (sen_idx+1, 
                                               e,
                                               traceback.format_exc()))
        except AttributeError as e:
            logging.fatal("Attribute error at sentence id %d: %s. This often "
                          "indicates an error in the predictor configuration "
                          "which could not be detected in initialisation. "
                          "Stack trace: %s" 
                          % (sen_idx+1, e, traceback.format_exc()))
        except Exception as e:
            logging.error("An unexpected %s error has occurred at sentence id "
                          "%d: %s, Stack trace: %s" % (sys.exc_info()[0],
                                                       sen_idx+1,
                                                       e,
                                                       traceback.format_exc()))
    print(sum(counts))
    logging.info("Decoding finished. Time: %.2f" % (time.time() - start_time))
    try:
        for output_handler in output_handlers:
            if output_handler == text_output_handler:
                output_handler.close_file()
            else:
                output_handler.write_hypos(all_hypos, sen_indices)
    except IOError as e:
        logging.error("I/O error %s occurred when creating output files: %s"
                      % (sys.exc_info()[0], e))
Exemplo n.º 59
0
def procedure_main(mode, db_info, config):
    """
    This is the real entry for tuning programs.

    :param mode: Three modes: tune, train and recommend.
    :param db_info: Dict data structure. db_info is used to store information about
                    the database to be connected and is transferred through the command line
                    or configuration file.
    :param config: Information read from xtuner.conf.
    :return: Exit status code.
    """
    # Set the minimum permission on the output files.
    os.umask(0o0077)
    # Initialize logger.
    set_logger(config['logfile'])
    logging.info('Starting... (mode: %s)', mode)
    db_agent = new_db_agent(db_info)

    # Clarify the scenario:
    if config['scenario'] in WORKLOAD_TYPE.TYPES:
        db_agent.metric.set_scenario(config['scenario'])
    else:
        config['scenario'] = db_agent.metric.workload_type
    # Clarify tune strategy:
    if config['tune_strategy'] == 'auto':
        # If more iterations are allowed, reinforcement learning is preferred.
        if config['rl_steps'] * config['max_episode_steps'] > 1500:
            config['tune_strategy'] = 'rl'
        else:
            config['tune_strategy'] = 'gop'

    logging.info("Configurations: %s.", config)
    if config['tuning_list'].strip() != '' and mode != 'recommend':
        knobs = load_knobs_from_json_file(config['tuning_list'])
    else:
        print("Start to recommend knobs. Just a moment, please.")
        knobs = recommend_knobs(mode, db_agent.metric)
    if not knobs:
        logging.fatal(
            'No recommended best_knobs for the database. Stop the execution.')
        return -1

    # If the recommend mode is not used,
    # the benchmark running and best_knobs tuning process need to be iterated.
    if mode != 'recommend':
        prompt_restart_risks()  # Users need to be informed of risks.

        recorder = Recorder(config['recorder_file'])
        bm = benchmark.get_benchmark_instance(config['benchmark_script'],
                                              config['benchmark_path'],
                                              config['benchmark_cmd'], db_info)
        env = DB_Env(db_agent,
                     benchmark=bm,
                     recorder=recorder,
                     drop_cache=config['drop_cache'],
                     mem_penalty=config['used_mem_penalty_term'])
        env.set_tuning_knobs(knobs)

        if mode == 'train':
            rl_model('train', env, config)
        elif mode == 'tune':
            if config['tune_strategy'] == 'rl':
                rl_model('tune', env, config)
            elif config['tune_strategy'] == 'gop':
                global_search(env, config)
            else:
                raise ValueError('Incorrect tune strategy: %s.' %
                                 config['tune_strategy'])

            recorder.give_best(knobs)
        else:
            raise ValueError('Incorrect mode value: %s.' % mode)

    # After the above process is executed, the tuned best_knobs are output.
    knobs.output_formatted_knobs()
    if config['output_tuning_result'] != '':
        with open(config['output_tuning_result'], 'w+') as fp:
            # In reinforcement learning training mode,
            # only the training knob list is dumped, but the recommended knob result is not dumped.
            # This is because, in tune mode of reinforcement learning,
            # users can directly load the dumped file as the knob tuning list.
            knobs.dump(fp, dump_report_knobs=mode != 'train')
    logging.info(
        'X-Tuner is executed and ready to exit. '
        'Please refer to the log for details of the execution process.')
    return 0
Exemplo n.º 60
0
def add_predictors(decoder):
    """Adds all enabled predictors to the ``decoder``. This function 
    makes heavy use of the global ``args`` which contains the
    SGNMT configuration. Particularly, it reads out ``args.predictors``
    and adds appropriate instances to ``decoder``.
    TODO: Refactor this method as it is waaaay tooooo looong
    
    Args:
        decoder (Decoder):  Decoding strategy, see ``create_decoder()``.
            This method will add predictors to this instance with
            ``add_predictor()``
    """
    preds = utils.split_comma(args.predictors)
    if not preds:
        logging.fatal("Require at least one predictor! See the --predictors "
                      "argument for more information.")
    weights = None
    if args.predictor_weights:
        weights = utils.split_comma(args.predictor_weights)
        if len(preds) != len(weights):
            logging.fatal("Specified %d predictors, but %d weights. Please "
                      "revise the --predictors and --predictor_weights "
                      "arguments" % (len(preds), len(weights)))
            return
    
    pred_weight = 1.0
    try:
        for idx, pred in enumerate(preds): # Add predictors one by one
            wrappers = []
            if '_' in pred: 
                # Handle weights when we have wrapper predictors
                wrappers = pred.split('_')
                pred = wrappers[-1]
                wrappers = wrappers[-2::-1]
                if weights:
                    wrapper_weights = [float(w) for w in weights[idx].split('_')]
                    pred_weight = wrapper_weights[-1]
                    wrapper_weights = wrapper_weights[-2::-1]
                else:
                    wrapper_weights = [1.0] * len(wrappers)
            elif weights:
                pred_weight = float(weights[idx])

            # Create predictor instances for the string argument ``pred``
            if pred == "nizza":
                p = NizzaPredictor(_get_override_args("pred_src_vocab_size"),
                                   _get_override_args("pred_trg_vocab_size"),
                                   _get_override_args("nizza_model"),
                                   _get_override_args("nizza_hparams_set"),
                                   _get_override_args("nizza_checkpoint_dir"),
                                   n_cpu_threads=args.n_cpu_threads)
            elif pred == "lexnizza":
                p = LexNizzaPredictor(_get_override_args("pred_src_vocab_size"),
                                      _get_override_args("pred_trg_vocab_size"),
                                      _get_override_args("nizza_model"),
                                      _get_override_args("nizza_hparams_set"),
                                      _get_override_args("nizza_checkpoint_dir"),
                                      n_cpu_threads=args.n_cpu_threads,
                                      alpha=args.lexnizza_alpha,
                                      beta=args.lexnizza_beta,
                                      trg2src_model_name=
                                          args.lexnizza_trg2src_model, 
                                      trg2src_hparams_set_name=
                                          args.lexnizza_trg2src_hparams_set,
                                      trg2src_checkpoint_dir=
                                          args.lexnizza_trg2src_checkpoint_dir,
                                      shortlist_strategies=
                                          args.lexnizza_shortlist_strategies,
                                      max_shortlist_length=
                                          args.lexnizza_max_shortlist_length,
                                      min_id=args.lexnizza_min_id)
            elif pred == "t2t":
                p = T2TPredictor(_get_override_args("pred_src_vocab_size"),
                                 _get_override_args("pred_trg_vocab_size"),
                                 _get_override_args("t2t_model"),
                                 _get_override_args("t2t_problem"),
                                 _get_override_args("t2t_hparams_set"),
                                 args.t2t_usr_dir,
                                 _get_override_args("t2t_checkpoint_dir"),
                                 t2t_unk_id=_get_override_args("t2t_unk_id"),
                                 n_cpu_threads=args.n_cpu_threads,
                                 max_terminal_id=args.syntax_max_terminal_id,
                                 pop_id=args.syntax_pop_id)
            elif pred == "segt2t":
                p = SegT2TPredictor(_get_override_args("pred_src_vocab_size"),
                                    _get_override_args("pred_trg_vocab_size"),
                                    _get_override_args("t2t_model"),
                                    _get_override_args("t2t_problem"),
                                    _get_override_args("t2t_hparams_set"),
                                    args.t2t_usr_dir,
                                    _get_override_args("t2t_checkpoint_dir"),
                                    t2t_unk_id=_get_override_args("t2t_unk_id"),
                                    n_cpu_threads=args.n_cpu_threads,
                                    max_terminal_id=args.syntax_max_terminal_id,
                                    pop_id=args.syntax_pop_id)
            elif pred == "editt2t":
                p = EditT2TPredictor(_get_override_args("pred_src_vocab_size"),
                                     _get_override_args("pred_trg_vocab_size"),
                                     _get_override_args("t2t_model"),
                                     _get_override_args("t2t_problem"),
                                     _get_override_args("t2t_hparams_set"),
                                     args.trg_test,
                                     args.beam,
                                     args.t2t_usr_dir,
                                     _get_override_args("t2t_checkpoint_dir"),
                                     t2t_unk_id=_get_override_args("t2t_unk_id"),
                                     n_cpu_threads=args.n_cpu_threads,
                                     max_terminal_id=args.syntax_max_terminal_id,
                                     pop_id=args.syntax_pop_id)
            elif pred == "fertt2t":
                p = FertilityT2TPredictor(
                                 _get_override_args("pred_src_vocab_size"),
                                 _get_override_args("pred_trg_vocab_size"),
                                 _get_override_args("t2t_model"),
                                 _get_override_args("t2t_problem"),
                                 _get_override_args("t2t_hparams_set"),
                                 args.t2t_usr_dir,
                                 _get_override_args("t2t_checkpoint_dir"),
                                 n_cpu_threasd=args.n_cpu_threads,
                                 max_terminal_id=args.syntax_max_terminal_id,
                                 pop_id=args.syntax_pop_id)
            elif pred == "fairseq":
                p = FairseqPredictor(_get_override_args("fairseq_path"),
                                     args.fairseq_user_dir,
                                     args.fairseq_lang_pair,
                                     args.n_cpu_threads,
                                     args.subtract_uni,
                                     args.subtract_marg,
                                     _get_override_args("marg_path"),
                                     args.lmbda,
                                     args.ppmi,
                                     args.epsilon)
            elif pred == "bracket":
                p = BracketPredictor(args.syntax_max_terminal_id,
                                     args.syntax_pop_id,
                                     max_depth=args.syntax_max_depth,
                                     extlength_path=args.extlength_path)
            elif pred == "osm":
                p = OSMPredictor(args.src_wmap,
                                 args.trg_wmap,
                                 use_jumps=args.osm_use_jumps,
                                 use_auto_pop=args.osm_use_auto_pop,
                                 use_unpop=args.osm_use_unpop,
                                 use_pop2=args.osm_use_pop2,
                                 use_src_eop=args.osm_use_src_eop,
                                 use_copy=args.osm_use_copy)
            elif pred == "forcedosm":
                p = ForcedOSMPredictor(args.src_wmap, 
                                       args.trg_wmap, 
                                       args.trg_test)
            elif pred == "fst":
                p = FstPredictor(_get_override_args("fst_path"),
                                 args.use_fst_weights,
                                 args.normalize_fst_weights,
                                 skip_bos_weight=args.fst_skip_bos_weight,
                                 to_log=args.fst_to_log)
            elif pred == "nfst":
                p = NondeterministicFstPredictor(_get_override_args("fst_path"),
                                                 args.use_fst_weights,
                                                 args.normalize_fst_weights,
                                                 args.fst_skip_bos_weight,
                                                 to_log=args.fst_to_log)
            elif pred == "forced":
                p = ForcedPredictor(
                                args.trg_test, 
                                utils.split_comma(args.forced_spurious, int))
            elif pred == "bow":
                p = BagOfWordsPredictor(
                                args.trg_test,
                                args.bow_accept_subsets,
                                args.bow_accept_duplicates,
                                args.heuristic_scores_file,
                                args.collect_statistics,
                                "consumed" in args.bow_heuristic_strategies,
                                "remaining" in args.bow_heuristic_strategies,
                                args.bow_diversity_heuristic_factor,
                                _get_override_args("pred_trg_vocab_size"))
            elif pred == "bowsearch":
                p = BagOfWordsSearchPredictor(
                                decoder,
                                args.hypo_recombination,
                                args.trg_test,
                                args.bow_accept_subsets,
                                args.bow_accept_duplicates,
                                args.heuristic_scores_file,
                                args.collect_statistics,
                                "consumed" in args.bow_heuristic_strategies,
                                "remaining" in args.bow_heuristic_strategies,
                                args.bow_diversity_heuristic_factor,
                                _get_override_args("pred_trg_vocab_size"))
            elif pred == "forcedlst":
                feat_name = _get_override_args("forcedlst_sparse_feat")
                p = ForcedLstPredictor(args.trg_test,
                                       args.use_nbest_weights,
                                       args.forcedlst_match_unk,
                                       feat_name if feat_name else None)
            elif pred == "rtn":
                p = RtnPredictor(args.rtn_path,
                                 args.use_rtn_weights,
                                 args.normalize_rtn_weights,
                                 to_log=args.fst_to_log,
                                 minimize_rtns=args.minimize_rtns,
                                 rmeps=args.remove_epsilon_in_rtns)
            elif pred == "kenlm":
                p = KenLMPredictor(args.lm_path)
            elif pred == "wc":
                p = WordCountPredictor(args.wc_word,
                                       args.wc_nonterminal_penalty,
                                       args.syntax_nonterminal_ids,
                                       args.syntax_min_terminal_id,
                                       args.syntax_max_terminal_id,
                                       args.negative_wc,
                                       _get_override_args("pred_trg_vocab_size"))
            elif pred == "ngramc":
                p = NgramCountPredictor(_get_override_args("ngramc_path"),
                                        _get_override_args("ngramc_order"),
                                        args.ngramc_discount_factor)
            elif pred == "unkc":
                p = UnkCountPredictor(
                     _get_override_args("pred_src_vocab_size"), 
                     utils.split_comma(args.unk_count_lambdas, float))
            elif pred == "length":
                length_model_weights = utils.split_comma(
                    args.length_model_weights, float)
                p = NBLengthPredictor(args.src_test_raw, 
                                      length_model_weights, 
                                      args.use_length_point_probs,
                                      args.length_model_offset)
            elif pred == "extlength":
                p = ExternalLengthPredictor(args.extlength_path)
            elif pred == "lrhiero":
                fw = None
                if args.grammar_feature_weights:
                    fw = utils.split_comma(args.grammar_feature_weights, float)
                p = RuleXtractPredictor(args.rules_path,
                                        args.use_grammar_weights,
                                        fw)
            else:
                logging.fatal("Predictor '%s' not available. Please check "
                              "--predictors for spelling errors." % pred)
                decoder.remove_predictors()
                return
            for _,wrapper in enumerate(wrappers):
                # Embed predictor ``p`` into wrapper predictors if necessary
                # TODO: Use wrapper_weights
                if wrapper == "idxmap":
                    src_path = _get_override_args("src_idxmap")
                    trg_path = _get_override_args("trg_idxmap")
                    if isinstance(p, UnboundedVocabularyPredictor): 
                        p = UnboundedIdxmapPredictor(src_path, trg_path, p, 1.0) 
                    else: # idxmap predictor for bounded predictors
                        p = IdxmapPredictor(src_path, trg_path, p, 1.0)
                elif wrapper == "maskvocab":
                    if isinstance(p, UnboundedVocabularyPredictor): 
                        p = UnboundedMaskvocabPredictor(args.maskvocab_vocab, p)
                    else: # idxmap predictor for bounded predictors
                        p = MaskvocabPredictor(args.maskvocab_vocab, p)
                elif wrapper == "weightnt":
                    p = WeightNonTerminalPredictor(
                        p, 
                        args.syntax_nonterminal_factor,
                        args.syntax_nonterminal_ids,
                        args.syntax_min_terminal_id,
                        args.syntax_max_terminal_id,
                        _get_override_args("pred_trg_vocab_size"))
                elif wrapper == "parse":
                    if args.parse_tok_grammar:
                        if args.parse_bpe_path:
                            p = BpeParsePredictor(
                                args.syntax_path,
                                args.syntax_bpe_path,
                                p,
                                args.syntax_word_out,
                                args.normalize_fst_weights,
                                norm_alpha=args.syntax_norm_alpha,
                                beam_size=args.syntax_internal_beam,
                                max_internal_len=args.syntax_max_internal_len,
                                allow_early_eos=args.syntax_allow_early_eos,
                                consume_out_of_class=args.syntax_consume_ooc,
                                terminal_restrict=args.syntax_terminal_restrict,
                                internal_only_restrict=args.syntax_internal_only,
                                eow_ids=args.syntax_eow_ids,
                                terminal_ids=args.syntax_terminal_ids)
                        else:
                            p = TokParsePredictor(
                                args.syntax_path,
                                p,
                                args.syntax_word_out,
                                args.normalize_fst_weights,
                                norm_alpha=args.syntax_norm_alpha,
                                beam_size=args.syntax_internal_beam,
                                max_internal_len=args.syntax_max_internal_len,
                                allow_early_eos=args.syntax_allow_early_eos,
                                consume_out_of_class=args.syntax_consume_ooc)
                    else:
                        p = ParsePredictor(
                            p,
                            args.normalize_fst_weights,
                            beam_size=args.syntax_internal_beam,
                            max_internal_len=args.syntax_max_internal_len,
                            nonterminal_ids=args.syntax_nonterminal_ids)
                elif wrapper == "altsrc":
                    src_test = _get_override_args("altsrc_test")
                    if isinstance(p, UnboundedVocabularyPredictor): 
                        p = UnboundedAltsrcPredictor(src_test, p)
                    else: # altsrc predictor for bounded predictors
                        p = AltsrcPredictor(src_test, p)
                elif wrapper == "rank":
                    if isinstance(p, UnboundedVocabularyPredictor): 
                        p = UnboundedRankPredictor(p)
                    else: # rank predictor for bounded predictors
                        p = RankPredictor(p)
                elif wrapper == "glue":
                    if isinstance(p, UnboundedVocabularyPredictor): 
                        p = UnboundedGluePredictor(args.max_len_factor, p)
                    else: # glue predictor for bounded predictors
                        p = GluePredictor(args.max_len_factor, p)
                elif wrapper == "word2char":
                    map_path = _get_override_args("word2char_map")
                    # word2char always wraps unbounded predictors
                    p = Word2charPredictor(map_path, p)
                elif wrapper == "skipvocab":
                    # skipvocab always wraps unbounded predictors
                    p = SkipvocabPredictor(args.skipvocab_vocab, 
                                           args.skipvocab_stop_size, 
                                           args.beam, 
                                           p)
                elif wrapper == "fsttok":
                    fsttok_path = _get_override_args("fsttok_path")
                    # fsttok always wraps unbounded predictors
                    p = FSTTokPredictor(fsttok_path,
                                        args.fst_unk_id,
                                        args.fsttok_max_pending_score,
                                        p)
                elif wrapper == "ngramize":
                    # ngramize always wraps bounded predictors
                    p = NgramizePredictor(args.min_ngram_order, 
                                          args.max_ngram_order,
                                          args.max_len_factor, p)
                elif wrapper == "unkvocab":
                    # unkvocab always wraps bounded predictors
                    p = UnkvocabPredictor(args.trg_vocab_size, p)
                else:
                    logging.fatal("Predictor wrapper '%s' not available. "
                                  "Please double-check --predictors for "
                                  "spelling errors." % wrapper)
                    decoder.remove_predictors()
                    return
            decoder.add_predictor(pred, p, pred_weight)
            logging.info("Initialized predictor {} (weight: {})".format(
                             pred, pred_weight))
    except IOError as e:
        logging.fatal("One of the files required for setting up the "
                      "predictors could not be read: %s" % e)
        decoder.remove_predictors()
    except AttributeError as e:
        logging.fatal("Invalid argument for one of the predictors: %s."
                       "Stack trace: %s" % (e, traceback.format_exc()))
        decoder.remove_predictors()
    except NameError as e:
        logging.fatal("Could not find external library: %s. Please make sure "
                      "that your PYTHONPATH and LD_LIBRARY_PATH contains all "
                      "paths required for the predictors. Stack trace: %s" % 
                      (e, traceback.format_exc()))
        decoder.remove_predictors()
    except ValueError as e:
        logging.fatal("A number format error occurred while configuring the "
                      "predictors: %s. Please double-check all integer- or "
                      "float-valued parameters such as --predictor_weights and"
                      " try again. Stack trace: %s" % (e, traceback.format_exc()))
        decoder.remove_predictors()
    except Exception as e:
        logging.fatal("An unexpected %s has occurred while setting up the pre"
                      "dictors: %s Stack trace: %s" % (sys.exc_info()[0],
                                                       e,
                                                       traceback.format_exc()))
        decoder.remove_predictors()