示例#1
0
 def getPackageInstance(self, category, package):
     """return instance of class Package from package file"""
     fileName =  getFilename( category, package )
     pack = None
     mod = None
     if fileName.endswith(".py") and os.path.isfile(fileName):
         if not fileName in self._packageDict:
             utils.debug( "module to import: %s" % fileName, 2 )
             if not os.path.isfile( fileName ):
                 try:
                     mod = builtins.__import__( fileName )
                 except ImportError as e:
                     utils.warning( 'import failed for module %s: %s' % (fileName, str(e)) )
                     mod =  None
             else:
                 modulename = os.path.basename( fileName )[:-3].replace('.', '_')
                 loader = importlib.machinery.SourceFileLoader(modulename, fileName)
                 try:
                     mod = loader.load_module()
                 except Exception as e:
                     raise PortageException("Failed to load file %s" % fileName, category, package, e)
             if not mod is None:
                 subpackage, package = getSubPackage( category, package )
                 self._CURRENT_MODULE  = ( fileName, category,subpackage, package, mod )
                 pack = mod.Package( )
                 self._packageDict[ fileName ] = pack
             else:
                 raise PortageException("Failed to find package", category, package)
         else:
             pack = self._packageDict[ fileName ]
         return pack
示例#2
0
def complete():
    """
    Show complete message
    """
    utils.check_or_exit("Have you pushed the version update to master?")

    warning("Release process is now complete.")
示例#3
0
	def onCheckUpdate(self, data, response):
		# TODO: controlla gli altri stati e se la response != 200
		#       blocca l'aggiornamento e porta alla pagine di 
		#       riepilogo con un warning
		if response.status != 200:
			self.riepilogo_label.set_text(_("Impossibile scaricare la lista degli aggiornamenti"))

		try:
			new_schema = SchemaUpdateInterface(parseString(data))
			old_schema = SchemaUpdateInterface.getCurrentSchema()
			
			if ret == 0:
				self.riepilogo_label.set_text(_("Nessun aggiornamento disponibile."))
			if ret == 1:
				# versioni compatibili possiamo aggiornare
				self.__checkFileToUpdate(new_schema)
			if ret == 2:
				# TODO: Una choice ..
				# come una clean install
				utils.warning(_("Le versioni sono potenzialmente compatibili\nma _NON_ viene garantito il perfetto aggiornamento"))
				pass
			if ret == 3:
				utils.error(_("Versioni incompatibili per procedere con l'aggiornamento"))
				pass
		except:
			utils.error(_("Impossibile interpretare il file xml"))
			return
示例#4
0
def __import__(module):  # pylint: disable=W0622
    utils.debug("module to import: %s" % module, 2)
    if not os.path.isfile(module):
        try:
            return __builtin__.__import__(module)
        except ImportError as e:
            utils.warning("import failed for module %s: %s" % (module, e.message))
            return None
    else:
        sys.path.append(os.path.dirname(module))
        modulename = os.path.basename(module).replace(".py", "")

        suff_index = None
        for suff in imp.get_suffixes():
            if suff[0] == ".py":
                suff_index = suff
                break

        if suff_index is None:
            utils.die("no .py suffix found")

        with open(module) as fileHdl:
            try:
                return imp.load_module(modulename.replace(".", "_"), fileHdl, module, suff_index)
            except ImportError as e:
                utils.warning("import failed for file %s: %s" % (module, e.message))
                return None
示例#5
0
文件: plugin.py 项目: vpicavet/Roam
 def missingLayers(self, layers):
     def showError():
         html = ["<h1>Missing Layers</h1>", "<ul>"]
         for layer in layers:
             html.append("<li>{}</li>".format(layer))
         html.append("</ul>")
         
         self.errorreport.updateHTML("".join(html))
     
     message = "Seems like {} didn't load correctly".format(utils._pluralstring('layer', len(layers)))
     
     utils.warning("Missing layers")
     map(utils.warning, layers)
         
     self.widget = self.iface.messageBar().createMessage("Missing Layers",
                                              message, 
                                              QIcon(":/icons/sad"))
     button = QPushButton(self.widget)
     button.setCheckable(True)
     button.setChecked(self.errorreport.isVisible())
     button.setText("Show missing layers")
     button.toggled.connect(showError)
     button.toggled.connect(functools.partial(self.errorreport.setVisible))
     self.widget.destroyed.connect(self.hideReports)
     self.widget.layout().addWidget(button)
     self.iface.messageBar().pushWidget(self.widget, QgsMessageBar.WARNING)
示例#6
0
def projectXML(wiki, parser, verbose=False):
    """Produce the main project file"""

    if verbose:
        print "Parsing main project information"
        
    mainData = parser.getListAsDict(parser.getSection (wiki, "Main data", 2))

    t = Template("""
  <projectname> ${Projectname} </projectname> 
  <projectshort> ${Acronym}  </projectshort>
  <duration> ${Duration} </duration>
  <call> ${Call}  </call> 
  <instrument> ${Instrument} </instrument> 
  <topics> ${Topics}</topics>
  <coordinatorname>   ${CoordinatorName} </coordinatorname>
  <coordinatoremail>   ${CoordinatorEmail} </coordinatoremail>
  <coordinatorphone>   ${CoordinatorPhone} </coordinatorphone>
#include<partners.xml>
    """)

    try: 
        res = t.safe_substitute (mainData)
    except KeyError as k:
        print "In the main project setup, an entry was missing: ", k.__str__()
        utils.warning("In the main project setup, an entry was missing: ", k.__str__())
        raise 
        
    # print res 
    return res
示例#7
0
 def enterSourceDir(self):
     if ( not os.path.exists( self.sourceDir() ) ):
         return False
     utils.warning("entering the source directory!")
     os.chdir( self.sourceDir() )
     if utils.verbose() > 0:
         print("entering: %s" % self.sourceDir())
	def _on_add_collection (self, widget):
		name = utils.InputDialog (None, _("Nome per la nuova collezione:")).run ()
		
		if impostazioni.get_collection (name) != None:
			utils.warning (_("Esiste gia' una collezione con lo stesso nome"))
			return
		elif name == None:
			utils.warning (_("Devi fornire un nome per la nuova collezione"))
			return
		else:
			keys = ('ph', 'kh', 'gh', 'no2', 'no3', 'con', 'am', 'fe', 'ra', 'fo', 'cal', 'mag', 'den')
			collection = {}
			
			if self.combo.get_active () == 0:
				# Nessun modello di base
				for i in keys:
					collection[i] = [None, None, None]
			else:
				base = impostazioni.get_collection (self.combo.get_active_text ())
				
				if not base:
					for i in keys:
						collection[i] = [None, None, None]
				else:
					collection = copy (base)
			
			# Aggiungiamo la nostra collection
			
			self.store.append ([name])
			impostazioni.add_collection (name, collection)
示例#9
0
文件: project.py 项目: NathanW2/qmap
 def settings(self):
     settings = os.path.join(self.folder, "settings.config")
     try:
         with open(settings,'r') as f:
             return yaml.load(f)
     except IOError as e:
         utils.warning(e)
         return None
示例#10
0
def complete():
    """
    Show complete message
    """
    para("Step 5 of 5: Complete release.")
    utils.check_or_exit("Have you pushed the version updates to master?")

    warning("Release process is now complete.")
示例#11
0
def unset_var(varname):
    if not os.getenv(varname) == None:
        print
        utils.warning(
            "%s found as environment variable. you cannot override emerge"
            " with this - unsetting %s locally" % (varname, varname)
        )
        os.environ[varname] = ""
示例#12
0
    def setBeeeOnSegmentTestfw(self, test):
        newTestsDict = test.getSupportedTests()

        self.allTestsDict.update(newTestsDict)

        if len(self.allTestsDict) > len(set(self.allTestsDict)):
            print utils.warning("extension conflict!")
            sys.stdout.flush()
            return False
示例#13
0
文件: project.py 项目: NathanW2/Roam
 def from_folder(cls, rootfolder):
     settings = os.path.join(rootfolder, "settings.config")
     project = cls(rootfolder, {})
     try:
         with open(settings, 'r') as f:
             settings = yaml.load(f)
             project.settings = settings
     except IOError as e:
         project.valid = False
         project.error = "No settings.config found in {} project folder".format(rootfolder)
         utils.warning(e)
     return project
示例#14
0
    def bindByName(self, controlname, value):
        """
        Binds a value to a control based on the control name.

        controlname - Name of the control to bind
        value - QVariant holding the value.
        """
        control = self.getControl(controlname)

        try:
            self.bindValueToControl(control, value)
        except BindingError as er:
            warning(er.reason)
示例#15
0
def run_command(cmd, internal=False, retval=False, progress=False):
    global _request
    if internal is False and not os.path.exists(config.CONFIG_KEY):
        run_command('get_key', True)
    data = None
    cfg = config.load_user_config()
    url = utils.parse_url(cfg.url)
    if _request:
        req = _request
    else:
        if url['scheme'].lower() == 'https':
            req = https.HTTPSConnection(url['host'], int(url['port'] or 443))
        else:
            req = httplib.HTTPConnection(url['host'], int(url['port'] or 80))
        _request = req
    original_cmd = cmd
    cmd = urllib.quote(json.dumps(cmd))
    query = '{0}run?{1}={2}'.format(url['path'] or '/', 'c' if internal is True else 'q', cmd)
    headers = sign_request(cfg.apikey, 'GET', query)
    headers.update({
        'User-Agent': 'dotcloud/cli (version: {0})'.format(VERSION),
        'X-DotCloud-Version': VERSION
        })
    trace_id = None
    try:
        req.request('GET', query, headers=headers)
        resp = req.getresponse()
        info = resp.getheader('X-Dotcloud-Info')
        trace_id = resp.getheader('X-Dotcloud-TraceID')
        data = resp.read()
        req.close()
        if _export is True:
            print data
            return
        if info:
            utils.warning(info.replace(';', '\n'))
        if _trace and trace_id:
            utils.info('TraceID for "{0}": {1}'.format(
                original_cmd, trace_id))
        data = json.loads(data)
        if data['type'] == 'cmd':
            return run_remote(data['data'])
        if 'data' in data and len(data['data']) > 0:
            if progress:
                sys.stderr.write('\r')
            print data['data']
        elif progress:
            sys.stderr.write('.')
    except socket.error, e:
        utils.die('Cannot reach DotCloud service ("{0}").\n' \
                'Please check the connectivity and try again.'.format(str(e)))
示例#16
0
    def system( self, command, errorMessage="", debuglevel=1, **kw):
        """convencience function for running system commands.
        This method prints a debug message and then runs a system command.
        If the system command returns with errors the method prints an error
        message and exits if @ref self.subinfo.options.exitOnErrors  is true"""

        utils.debug( str(command), debuglevel )
        if utils.system( command, **kw):
            return True
        if self.subinfo.options.exitOnErrors:
            utils.warning( "while running %s cmd: %s" % (errorMessage, str(command)) )
        else:
            utils.warning( "while running %s cmd: %s" % (errorMessage, str(command)) )
        return False
示例#17
0
 def getAllPackages( self, category ):
     """returns all packages of a category except those that are listed in a file 'dont_build.txt' in the category directory
     in case the category doesn't exist, nothing is returned"""
     if self.isCategory( category ):
         plist = copy.copy(self.categories[ category ])
         if os.path.exists( os.path.join( rootDirForCategory( category ), category, "dont_build.txt" ) ):
             with open( os.path.join( rootDirForCategory( category ), category, "dont_build.txt" ), "r" ) as f:
                 for line in f:
                     try:
                         plist.remove( line.strip() )
                     except ValueError:
                         utils.warning( "couldn't remove package %s from category %s's package list" % ( line.strip(), category ) )
         return plist
     else:
         return
示例#18
0
文件: run.py 项目: Kelimion/rejit
def plot_results():
  utils.command_assert(['scons', '-C', utils.dir_rejit, 'flot_js'])

  print("Plotting results...")

  html_file_results = open(join(utils.dir_benchmarks, 'benchmarks_results.html'), 'w')

  html_file_header = open(join(utils.dir_benchmarks_resources_html, 'benchmarks_results.html.header'), 'r')
  html_file_results.write(html_file_header.read())
  html_file_header.close()

  html_file_results.write('<h2>Info</h2>\n')
  html_file_results.write('Date: %s<br/>\n' % datetime.datetime.now().strftime("%Y/%m/%d %H:%M"))
  html_file_results.write('Command: <code>%s</code><br/>\n' % ' '.join(sys.argv))
  iter_args = copy.deepcopy(vars(args))
  iter_args.pop('machine_description')
  html_file_results.write('Arguments: <code>%s</code><br/>\n' % iter_args)
  if args.machine_description:
    if not os.path.isfile(args.machine_description):
      utils.warning("Could not open '%s'" % args.machine_description)
    else:
      desc_file = open(args.machine_description, 'r')
      html_file_results.write('<h3>Machine description</h3>\n\n')
      html_file_results.write(desc_file.read())
      desc_file.close()

  html_file_results.write('<h3>Engines versions</h3>\n\n')
  html_file_results.write('<table style="text-align:right;">\n')
  html_file_results.write('<tr><td>engine</td><td style="padding-left:50px;">commit</td></tr>')
  for engine in engines:
    html_file_results.write('<tr>\n')
    html_file_results.write('  <td>%s</td><td style="padding-left:50px;"><pre style="padding:0 0 0 0;margin:0 0 0 0;">%s</pre></td>\n' % (engine.name, engine.commit_id()))
    html_file_results.write('</tr>\n')

  html_file_results.write('</table>\n')

  html_file_results.write('<h2>Results</h2>\n\n')
  html_file_results.write('<table>\n')
  for res in results:
    html_file_results.write(res.plot())
  html_file_results.write('</table>\n')

  html_file_footer = open(join(utils.dir_benchmarks_resources_html, 'benchmarks_results.html.footer'), 'r')
  html_file_results.write(html_file_footer.read())
  html_file_footer.close()

  html_file_results.close()
示例#19
0
	def populateProgramIter(self, data, response, index):
		#TODO: unlock interface
		
		it = self.getIterFromIndex(index)
		model = self.tree.get_model()
		
		#print response.status, data

		if data == None or response.status != 200:
			self.reportError(data, response, index, STEP_PROGRAM)
			#self.status.push(0, _("Errore durante lo scaricamento della lista dei file(HTTP %d)") % response.status)
			model.set_value(it, COL_COLO, self.color_error)
			return
	
		#try:
		# TODO: in pratica qui dovremmo leggere la revisione e piazzarla nella colonna
		# infatti suggerisco di modificare il nome delle colonne eliminando la col per l'md5
		# e inserirne solo 2 una per la revision nuova e una per la vecchia
		# NOTA: una sola colonna contenente revision tipo 1.2-r2
		
		new_schema = ReportReader(data)
		old_schema = ReportReader(None, os.path.join(utils.DHOME_DIR, "pyacqua.xml"))
		
		a = (new_schema.get("mainversion"), new_schema.get("secondversion"), new_schema.get("revision"))
		model.set_value(it, COL_NREV, self.versionize(a))
		
		ret = old_schema.checkDiff(new_schema)
		
		if ret == 0:
			# messaggio nessun aggiornamento disponibile ecc...
			utils.info(_("Nessun aggiornamento disponibile"))
		if ret == 1:
			# versioni compatibili possiamo aggiornare
			self.__checkFileToUpdate(new_schema)
		if ret == 2:
			# TODO: Una choice ..
			# come una clean install
			utils.warning(_("Le versioni sono potenzialmente compatibili\nma _NON_ viene garantito il perfetto aggiornamento"))
			pass
		if ret == 3:
			utils.error(_("Versioni incompatibili per procedere con l'aggiornamento"))
			pass
		#except:
		#	self.status.push(0, _("Impossibile interpretare il file xml"))
		#	return
		
		self.update_btn.set_sensitive(True)
示例#20
0
    def __init__( self ):
        self.subinfo = subinfo()
        # required for package generating because we build from svnHEAD by default
        self.subinfo.options.package.version = '0.5.4'
        self.subinfo.options.configure.defines = '-DBUILD_BASE_LIB_WITH_QT=ON -DBUILD_QT_LIB=ON '
        if not emergePlatform.isCrossCompilingEnabled() or self.isHostBuild():
            self.subinfo.options.configure.defines += ' -DBUILD_TOOLS=ON '
        if compiler.isMinGW_W32():
          self.subinfo.options.configure.defines += ' -DMINGW_W32=ON '
        CMakePackageBase.__init__( self )

        qmake = os.path.join(self.mergeDestinationDir(), "bin", "qmake.exe")
        if not os.path.exists(qmake):
            utils.warning("could not find qmake in <%s>" % qmake)
        ## \todo a standardized way to check if a package is installed in the image dir would be good.
        self.subinfo.options.configure.defines += " -DQT_QMAKE_EXECUTABLE:FILEPATH=%s " \
            % qmake.replace('\\', '/')
示例#21
0
    def bindFeature(self, qgsfeature, mandatory_fields=True, editing=False):
        """
        Binds a features values to the form. If the control has the mandatory
        property set then it will be added to the mandatory group.

        qgsfeature - A QgsFeature to bind the values from
        mandatory_fields - True if mandatory fields should be respected (default)
        """
        self.feature = qgsfeature
        self.connectControlsToSQLCommands()
        defaults = self.getDefaults()
        for index, value in qgsfeature.attributeMap().items():
            field = self.fields[index]

            try:
                control = self.getControl(field.name())
            except ControlNotFound as ex:
                warning(ex.message)
                continue

            if mandatory_fields:
                mandatory = control.property("mandatory").toBool()
                if mandatory:
                    buddy = self.getBuddy(control)
                    self.mandatory_group.addWidget(control, buddy)

            info("Binding %s to %s" % (control.objectName(), value.toString()))

            isdefaultset = False
            if not editing:
                try:
                    # Get the default value from the database and use that instead.
                    value = defaults[control]
                    isdefaultset = control in defaults
                except KeyError:
                    pass

            try:
                self.bindValueToControl(control, value)
            except BindingError as er:
                warning(er.reason)

            self.bindSaveValueButton(control, indefaults=isdefaultset)
            self.createHelpLink(control)

            self.fieldtocontrol[index] = control
示例#22
0
文件: project.py 项目: loongfee/Roam
def readfolderconfig(folder, configname):
    """
    Read the config file from the given folder. A file called settings.config is expected to be found in the folder.
    :param folder: The folder to read the config from.
    :return: Returns None if the settings file could not be read.
    """
    settingspath = os.path.join(folder, "{}.config".format(configname))
    if not os.path.exists(settingspath):
        settingspath = os.path.join(folder, "settings.config")

    try:
        with open(settingspath, 'r') as f:
            settings = yaml.load(f) or {}
        return settings
    except IOError as e:
        utils.warning(e)
        utils.warning("Returning empty settings for settings.config")
        return {}
示例#23
0
    def bindFeature(self, qgsfeature, mandatory_fields=True, editing=False):
        """
        Binds a features values to the form. If the control has the mandatory
        property set then it will be added to the mandatory group.

        qgsfeature - A QgsFeature to bind the values from
        mandatory_fields - True if mandatory fields should be respected (default)
        """
        self.feature = qgsfeature
        defaults = self.form.getSavedValues()

        for index, value in qgsfeature.attributeMap().items():
            name = str(self.fields[index].name())

            try:
                control = self.getControl(name)
            except ControlNotFound as ex:
                warning(ex.message)
                continue

            if mandatory_fields:
                mandatory = control.property("mandatory").toBool()
                if mandatory:
                    buddy = self.getBuddy(control)
                    self.mandatory_group.addWidget(control, buddy)

            info("Binding %s to %s" % (control.objectName(), value.toString()))

            self.bindSaveValueButton(control, defaults, editingmode=editing)
            if not editing:
                try:
                    value = defaults[name]
                except KeyError:
                    pass

            try:
                self.bindValueToControl(control, value, index)
            except BindingError as er:
                warning(er.reason)

            self.createHelpLink(control)

            self.fieldtocontrol[index] = control
示例#24
0
def getPackagesCategories(packageName, defaultCategory=None):
    utils.debug("getPackagesCategories for package name %s" % packageName, 1)
    if defaultCategory is None:
        if "EMERGE_DEFAULTCATEGORY" in os.environ:
            defaultCategory = os.environ["EMERGE_DEFAULTCATEGORY"]
        else:
            defaultCategory = "kde"

    packageList, categoryList = [], []
    if len(packageName.split("/")) == 1:
        if PortageInstance.isCategory(packageName):
            utils.debug("isCategory=True", 2)
            packageList = PortageInstance.getAllPackages(packageName)
            categoryList = [packageName] * len(packageList)
        else:
            utils.debug("isCategory=False", 2)
            if PortageInstance.isCategory(defaultCategory) and PortageInstance.isPackage(defaultCategory, packageName):
                # prefer the default category
                packageList = [packageName]
                categoryList = [defaultCategory]
            else:
                if PortageInstance.getCategory(packageName):
                    packageList = [packageName]
                    categoryList = [PortageInstance.getCategory(packageName)]
                else:
                    utils.warning("unknown category or package: %s" % packageName)
    elif len(packageName.split("/")) == 2:
        [cat, pac] = packageName.split("/")
        if PortageInstance.isCategory(cat):
            categoryList = [cat]
        else:
            utils.warning("unknown category %s; ignoring package %s" % (cat, packageName))
        if len(categoryList) > 0 and PortageInstance.isPackage(categoryList[0], pac):
            packageList = [pac]
        if len(categoryList) and len(packageList):
            utils.debug("added package %s/%s" % (categoryList[0], pac), 2)
        else:
            utils.debug("ignoring package %s" % packageName)
    else:
        utils.error("unknown packageName")

    return packageList, categoryList
示例#25
0
    def bindFeature(self, qgsfeature, mandatory_fields=True, editing=False):
        """
        Binds a features values to the form. If the control has the mandatory
        property set then it will be added to the mandatory group.

        qgsfeature - A QgsFeature to bind the values from
        mandatory_fields - True if mandatory fields should be respected (default)
        """
        self.feature = qgsfeature
        defaults = qmaplayer.getSavedValues(self.layer)
        
        for index in xrange(qgsfeature.fields().count()):
            value = qgsfeature[index]
            name = qgsfeature.fields()[index].name()
            
            try:
                control = self.getControl(name)
            except ControlNotFound as ex:
                warning(ex.message)
                continue

            if mandatory_fields:
                mandatory = control.property("mandatory")
                if mandatory:
                    buddy = self.getBuddy(control)
                    self.mandatory_group.addWidget(control, buddy)

            self.bindSaveValueButton(control, defaults, editingmode=editing)
            if not editing:
                try:
                    value = defaults[name]
                except KeyError:
                    pass

            try:
                self.bindValueToControl(control, value, index)
            except BindingError as er:
                warning(er.reason)

            self.createHelpLink(control)
            self.mandatory_group.validateAll()
示例#26
0
    def __readDependenciesForChildren( self, deps):
        children = []
        if deps:
            for line in deps:
                ( category, package ) = line.split( "/" )
                utils.debug( "category: %s, name: %s" % ( category, package ), 1 )
                try:
                    version = PortageInstance.getNewestVersion( category, package )
                except PortageException as e:
                    utils.warning("%s for %s/%s as a dependency of %s/%s" %(e, e.category, e.package, self.category , self.name))
                    continue

                if not line in self._dependencyList.keys():
                    p = DependencyPackage( category, package, False, self )
                    utils.debug( "adding package p %s/%s-%s" % ( category, package, version ), 1 )
                    self._dependencyList[ line ] = p
                    p.__readChildren()
                else:
                    p = self._dependencyList[ line ]
                children.append( p )
        return children
示例#27
0
文件: run.py 项目: Kelimion/rejit
  def run(self, benchmark, sizes):
    if not os.path.exists(self.exec_path):
      utils.error("Could not find: %s" % self.exec_path)

    run_command = [
        self.exec_path,
        benchmark.regexp(self.syntax),
        '--iterations=' + str(args.iterations),
        '--low_char=' + benchmark.low_char,
        '--high_char=' + benchmark.high_char,
        ] + ('--size' + self.args_list_assign_char + self.args_list_separator.join(map(str, sizes))).split(' ')
    # The regexp is enclosed with quotes.
    printed_run_command = [
        self.exec_path,
        '"' + benchmark.regexp(self.syntax) + '"',
        '--iterations=' + str(args.iterations),
        '--low_char=' + benchmark.low_char,
        '--high_char=' + benchmark.high_char,
        '--size' + self.args_list_assign_char + self.args_list_separator.join(map(str, sizes))
        ]

    if verbose or args.display:
      print("Benchmarking %s for regexp \"%s\"" %(self.name, '"' + benchmark.regexp(self.syntax) + '"'))
    if verbose:
      verbose("Command: %s" % (' '.join(printed_run_command)))


    p = subprocess.Popen(run_command, stdout=subprocess.PIPE)
    rc = p.wait()
    if rc != 0:
      print("Failed to run:\n%s" % (' '.join(printed_run_command)))
      print("Output:\n%s" % (p.communicate()[0]))
      utils.warning("Failed to run benchmark.")
      return None

    output = p.communicate()[0]
    if args.display:
      print output
    return output
示例#28
0
 def __init__( self, **args ):
     self.subinfo = subinfo()
     PackageBase.__init__(self)
     if not self.subinfo.options.useShortPathes \
             and self.compiler() == "mingw4" and len(self.rootdir) > 10:
         # mingw4 cannot compile qt if the command line arguments
         # exceed 8192 chars
         utils.warning('for mingw4, rootdir %s is too long for full path names.'
             ' Using short path names.' % self.rootdir,  1)
         self.subinfo.options.useShortPathes = True
     GitSource.__init__(self)
     QMakeBuildSystem.__init__(self)
     KDEWinPackager.__init__(self)
     # get instance of dbus and openssl package
     self.openssl = portage.getPackageInstance('win32libs-bin', 'openssl')
     if self.buildType() == "Debug":
         self.dbus = portage.getPackageInstance('win32libs-sources', 'dbus-src')
     else:
         self.dbus = portage.getPackageInstance('win32libs-bin', 'dbus')
     if not emergePlatform.isCrossCompilingEnabled():
         self.mysql_server = portage.getPackageInstance('testing', 'mysql-pkg')
     else:
         self.wcecompat = portage.getPackageInstance('win32libs-sources', 'wcecompat-src')
示例#29
0
def addInstalled(category, package, version, buildtype=""):
    """ deprecated, use InstallDB.installdb.addInstalled() instead """
    utils.debug("addInstalled called", 2)
    # write a line to etc/portage/installed,
    # that contains category/package-version
    path = os.path.join(utils.etcDir())
    if not os.path.isdir(path):
        os.makedirs(path)
    if buildtype != "":
        fileName = "installed-" + buildtype
    else:
        fileName = "installed"
    utils.debug("installing package %s - %s into %s" % (package, version, fileName), 2)
    if os.path.isfile(os.path.join(path, fileName)):
        with open(os.path.join(path, fileName), "rb") as f:
            for line in f:
                if line.startswith("%s/%s-%s" % (category, package, version)):
                    utils.warning("version already installed")
                    return
                elif line.startswith("%s/%s-" % (category, package)):
                    utils.die("already installed, this should no happen")
    with open(os.path.join(path, fileName), "ab") as f:
        f.write("%s/%s-%s\r\n" % (category, package, version))
示例#30
0
    def qmerge( self ):
        """mergeing the imagedirectory into the filesystem"""
        if utils.verbose() > 1:
            print "base qmerge called"
        for pkgtype in ['bin', 'lib', 'doc', 'src']:
            script = os.path.join( self.packagedir, "post-install-%s.cmd" ) % pkgtype
            scriptName = "post-install-%s-%s-%s.cmd" % ( self.package, self.version, pkgtype )
            destscript = os.path.join( self.imagedir, "manifest", scriptName )
            if not os.path.exists( os.path.join( self.imagedir, "manifest" ) ):
                os.mkdir( os.path.join( self.imagedir, "manifest" ) )
            if os.path.exists( script ):
                shutil.copyfile( script, destscript )

        utils.mergeImageDirToRootDir( self.imagedir, self.rootdir )
        # run post-install scripts
        for pkgtype in ['bin', 'lib', 'doc', 'src']:
            scriptName = "post-install-%s-%s-%s.cmd" % ( self.package, self.version, pkgtype )
            script = os.path.join( self.rootdir, "manifest", scriptName )
            if os.path.exists( script ):
                cmd = "cd %s && %s" % ( self.rootdir, script )
                if not utils.system(cmd):
                    utils.warning("%s failed!" % cmd )
        portage.addInstalled( self.category, self.package, self.version )
        return True
示例#31
0
文件: miner.py 项目: youht88/bc
def nodeList():
   utils.warning("node.nodes",node.nodes)
   response={
      'nodes': list(node.nodes)
   }
   return jsonify(response),200
示例#32
0
def get_host_link_args():
    bundled = [ ]
    system = [ ]

    llvm_dynamic = True
    llvm_val = get()
    llvm_config = get_llvm_config()
    clang_static_libs = ['-lclangFrontend',
                         '-lclangSerialization',
                         '-lclangDriver',
                         '-lclangCodeGen',
                         '-lclangParse',
                         '-lclangSema',
                         '-lclangAnalysis',
                         '-lclangEdit',
                         '-lclangASTMatchers',
                         '-lclangAST',
                         '-lclangLex',
                         '-lclangBasic']
    llvm_components = ['bitreader',
                       'bitwriter',
                       'ipo',
                       'instrumentation',
                       'option',
                       'objcarcopts',
                       'profiledata',
                       'all-targets',
                       'coverage',
                       'coroutines',
                       'lto']


    if llvm_val == 'system':
        # On Mac OS X with Homebrew, apply a workaround for issue #19217.
        # This avoids linking with the libc++ installed by llvm@12 e.g.
        if use_system_libcxx_workaround():
            sdkroot = get_system_llvm_built_sdkroot()
            if sdkroot:
                # Note: -isysroot only affects includes
                # and -Wl,-syslibroot seems to have no effect
                system.append("-L" + os.path.join(sdkroot, "usr", "lib"))

        # Decide whether to try to link statically or dynamically.
        # Future work: consider using 'llvm-config --shared-mode'
        # to make this choice.
        host_platform = chpl_platform.get('host')
        if host_platform == 'darwin':
            llvm_dynamic = False

        shared_mode = run_command([llvm_config, '--shared-mode'])

        if shared_mode.strip() == 'static':
            llvm_dynamic = False

        # Make sure to put clang first on the link line
        # because depends on LLVM libraries
        if llvm_dynamic:
            system.append('-lclang-cpp')
        else:
            system.extend(clang_static_libs)

        libdir = run_command([llvm_config, '--libdir'])
        if libdir:
            libdir = libdir.strip()
            system.append('-L' + libdir)
            system.append('-Wl,-rpath,' + libdir)

        ldflags = run_command([llvm_config,
                               '--ldflags', '--system-libs', '--libs'] +
                              llvm_components)
        if ldflags:
            system.extend(filter_llvm_link_flags(ldflags.split()))


    elif llvm_val == 'bundled':
        # Link statically for now for the bundled configuration
        # If this changes in the future:
        # * check for problems finding libstdc++ with different PrgEnv compilers
        # * make sure that 'make install' works correctly in terms of any
        #   rpaths embedded in the executable
        llvm_dynamic = False

        # don't try to run llvm-config if it's not built yet
        if is_included_llvm_built():

            libdir = run_command([llvm_config, '--libdir'])
            if libdir:
                libdir = libdir.strip()
                bundled.append('-L' + libdir)
                bundled.append('-Wl,-rpath,' + libdir)

            if llvm_dynamic:
                bundled.append('-lclang-cpp')
            else:
                bundled.extend(clang_static_libs)

            ldflags = run_command([llvm_config,
                                   '--ldflags', '--libs'] +
                                  llvm_components)

            bundled.extend(ldflags.split())

            system_libs = run_command([llvm_config,
                                      '--system-libs'] +
                                      llvm_components)

            system.extend(system_libs.split())

        else:
            warning("included llvm not built yet")

    return (bundled, system)
def main(input_path,
         only_report=False,
         force_dir=False,
         no_config_check=False,
         restart=False,
         is_testing_run=False,
         manual_config_tag=None):
    # settable parameters
    ############################################################

    email = "*****@*****.**"
    passw = None

    ############################################################

    # set the experiment parameters
    error("Non-existent input path: {} ".format(input_path),
          not exists(input_path))
    if isdir(input_path):
        # assume a single .yml file in the directory
        ymls = [
            x for x in listdir(input_path) if any(
                x.endswith(suff) for suff in [".yaml", ".yml"])
        ]
        error(
            "Input path {} is a directory with no yaml configuration files.".
            format(input_path), not ymls)
        error(
            "Input path is a directory with more than one yaml configuration files."
            .format(input_path),
            len(ymls) > 1)
        config_file = join(input_path, ymls[0])
    else:
        config_file = input_path

    # if input file is existing csv scores, just print them
    if config_file.endswith(".csv"):
        print_existing_csv_results(config_file)
        return

    conf = read_ordered_yaml(config_file)

    try:
        exps = conf[EXPERIMENTS_KEY_NAME]
    except KeyError:
        error(
            f"Need an [{EXPERIMENTS_KEY_NAME}] key for large-scale experiments."
        )

    # folder to run experiments in
    run_dir = exps["run_folder"]
    if force_dir:
        warning(
            "Overriding experiment folder from yml value: {} to current dir: {}, due to force-dir"
            .format(run_dir, dirname(run_dir)))
        run_dir = dirname(input_path)
    if not isabs(run_dir):
        run_dir = join(os.getcwd(), run_dir)

    # dir checks
    # ----------
    # virtualenv folder
    venv_dir = conf[EXPERIMENTS_KEY_NAME]["venv"] if "venv" in conf[
        EXPERIMENTS_KEY_NAME] else None
    # results csv file
    # results_file = conf["experiments"]["results_file"]
    results_file = join(run_dir, "run_results.csv")

    if venv_dir and not exists(venv_dir):
        error("Virtualenv dir {} not found".format(venv_dir))
    if not exists(run_dir):
        info("Run dir {} not found, creating.".format(run_dir))
        makedirs(run_dir)
    else:
        error(
            "Specified a non-dir path as the running directory: {}".format(
                run_dir), not isdir(run_dir))
        if restart:
            warning(
                "Specified restart, and experiment dir {} exists. Deleting!")
            rmtree(run_dir)
            makedirs(run_dir)

    # logging
    os.makedirs(run_dir, exist_ok=True)
    setup_simple_logging(conf["print"]["log_level"], logging_dir=run_dir)

    info("Generating configurations from source file {}".format(config_file))

    # evaluation measures
    try:
        eval_measures = as_list(exps["measures"]) if "measures" in exps else [
            "f1-score", "accuracy"
        ]
        print(eval_measures)
        aggr_measures = as_list(exps["label_aggregation"]) if "label_aggregation" in exps \
            else ["macro", "micro"]
        stat_functions = as_list(
            exps["fold_aggregation"]) if "fold_aggregation" in exps else [
                "mean"
            ]
        run_types = as_list(
            exps["run_types"]) if "run_types" in exps else ["run"]
        do_sstests = "sstests" in exps
        if not do_sstests:
            warning("No statistical tests specified.")
        else:
            sstests = ["tukeyhsd"
                       ] if "names" not in exps["sstests"] else as_list(
                           exps["sstests"]["names"])
            sstests_measures = [
                "f1-score"
            ] if "measures" not in exps["sstests"] else as_list(
                exps["sstests"]["measures"])
            sstests_aggregations = [
                "macro"
            ] if "aggregations" not in exps["sstests"] else as_list(
                exps["sstests"]["aggregations"])
            sstests_limit_vars = None if "limit_variables" not in exps[
                "sstests"] else as_list(exps["sstests"]["limit_variables"])
    except Exception as ex:
        error(
            "Failed to read evaluation / testing options due to: [{}]".format(
                ex))

    # folder where run scripts are
    sources_dir = exps["sources_dir"] if "sources_dir" in exps else os.getcwd()
    warning("Defaulting sources folder to the current directory: {}".format(
        sources_dir))
    error(
        "Main module: {} not found. Is the sources dir ok?".format(
            join(sources_dir, "main.py")),
        not exists(join(sources_dir, "main.py")))

    configs = make_configs(conf, run_dir, sources_dir)
    # check run id uniqueness
    if len(set([c.id for c in configs])) != len(configs):
        error("Duplicate run folders from the input: {}".format(
            [c.id for c in configs]))
    if len(set([c['folders']['run'] for c in configs])) != len(configs):
        error("Duplicate run folders from the input: {}".format(
            [c["folders"]["run"] for c in configs]))
    # if we're running a testing suite, filter out incompatible configs
    if is_testing_run:
        configs = filter_testing(configs, config_file)

    # mail
    do_send_mail = exps["send_mail"] if "send_mail" in exps else None
    if do_send_mail:
        passw = getpass.getpass()

    # copy the experiments configuration file in the target directory
    experiments_conf_path = join(run_dir, basename(config_file))
    if exists(experiments_conf_path):
        # make sure it's the same effing config, unless check is overriden
        if not no_config_check:
            config_to_copy = OrderedDict(
                {k: v
                 for (k, v) in conf.items() if k != EXPERIMENTS_KEY_NAME})
            existing_exp_conf = read_ordered_yaml(experiments_conf_path)
            existing_exp_conf = OrderedDict({
                k: v
                for (k, v) in existing_exp_conf.items()
                if k != EXPERIMENTS_KEY_NAME
            })
            equal, diff = compare_dicts(config_to_copy, existing_exp_conf)
            if not equal:
                error(
                    "The workflow contents derived from the original config [{}] differ from the ones in the experiment directory: [{}]!\nDifference is: {}"
                    .format(config_file, experiments_conf_path, diff))
    else:
        if not only_report:
            info("Copying experiments configuration at {}".format(
                experiments_conf_path))
            with open(experiments_conf_path, "w") as f:
                write_ordered_dump(OrderedDict(conf), f)
        else:
            info(
                "Only-report run: will not copy experiment configuration at {}"
                .format(experiments_conf_path))

    results, result_paths = {}, {}

    #################################################################################
    skipped_configs = []

    # prelim experiments
    for conf_index, conf in enumerate(configs):
        run_id = conf.id
        # prepend a configuration id tag, if supplied
        if manual_config_tag is not None:
            run_id += manual_config_tag
            experiment_dir = conf["folders"]["run"] + manual_config_tag
        else:
            experiment_dir = conf["folders"]["run"]
        info("Running experimens for configuration {}/{}: {}".format(
            conf_index + 1, len(configs), run_id))
        completed_file = join(experiment_dir, "completed")
        error_file = join(experiment_dir, "error")
        # results to run folders, if not specified otherwise
        respath = join(experiment_dir, "results")
        if not isabs(respath):
            conf["folders"]["results"] = join(experiment_dir, respath)

        if exists(completed_file):
            info("Skipping completed experiment {}".format(run_id))
        elif only_report:
            info("Only-report execution: skipping non-completed experiment {}".
                 format(run_id))
            skipped_configs.append(run_id)
            continue
        else:
            # run it
            if exists(error_file):
                os.remove(error_file)
            makedirs(experiment_dir, exist_ok=True)

            conf_path = join(experiment_dir, "config.yml")
            if exists(conf_path) and not no_config_check:
                warning("Configuration file at {} already exists!".format(
                    conf_path))
                existing = read_ordered_yaml(conf_path)
                equal, diff = compare_dicts(existing, conf)
                if not equal:
                    error(
                        "Different local config encountered: {} \nDifference: {}"
                        .format(conf_path, diff))
                #if not (OrderedDict(conf) == existing):
                #    error("Different local config encountered at {}".format(conf_path))
            else:
                with open(conf_path, "w") as f:
                    write_ordered_dump(OrderedDict(conf), f)
            info("Configuration file: {}".format(conf_path))
            # write the run script file
            script_path = join(experiment_dir, "run.sh")
            with open(script_path, "w") as f:
                if venv_dir:
                    f.write("source \"{}/bin/activate\"".format(venv_dir))
                f.write("cd \"{}\"\n".format(sources_dir))
                f.write(
                    "python3 \"{}\" \"{}\" && touch \"{}\" && exit 0\n".format(
                        join(sources_dir, "main.py"), conf_path,
                        completed_file))
                f.write("touch '{}' && exit 1\n".format(error_file))

            subprocess.run(["/usr/bin/env", "bash", script_path])
            if exists(error_file):
                print("An error has occurred in the run, exiting.")
                info("An error has occurred in the run, exiting.")
                if do_send_mail:
                    sendmail(email, passw, "an error occurred")
                exit(1)
        # read experiment results
        exp_res_file = join(experiment_dir, "results", "results.pkl")
        with open(exp_res_file, "rb") as f:
            res_data = pickle.load(f)
        results[run_id] = res_data
        result_paths[run_id] = exp_res_file

    # messages = []
    total_results = {}

    # show results
    for stat in stat_functions:
        info("Results regarding {} statistic:".format(stat))
        print_vals = {}
        for run_id in results:
            print_vals[run_id] = {}
            for m in eval_measures:
                for run in run_types:
                    for ag in aggr_measures:
                        try:
                            results[run_id][run][m][ag]
                        except KeyError:
                            continue
                        header = "{}.{}.{}.{}".format(run[:3], m[:3], ag[:3],
                                                      stat)

                        if stat in "var mean std".split():
                            val = results[run_id][run][m][ag][stat]
                        if val is None:
                            continue
                        val = round(val, decimals=4)
                        print_vals[run_id][header] = val
        # print'em
        info("SCORES:")
        print_dataframe_results(print_vals)

        total_results[stat] = print_vals
    info("Writing these results to file {}".format(results_file))
    total_df = pd.DataFrame.from_dict(total_results, orient='index')
    if total_df.size == 0:
        info("No results parsed.")
    else:
        total_df.to_csv(results_file)

    if skipped_configs:
        for s, sk in enumerate(skipped_configs):
            info("Skipped incomplete config: {}/{} : {}".format(
                s + 1, len(skipped_configs), sk))

    if do_sstests:
        do_stat_sig_testing(sstests, sstests_measures, sstests_aggregations,
                            configs, results, sstests_limit_vars)

    # [info(msg) for msg in messages]
    if do_send_mail:
        sendmail(email, passw, "run complete.")
def do_stat_sig_testing(methods,
                        measures,
                        label_aggregations,
                        configs,
                        results,
                        limit_variables=None,
                        run_mode="run"):
    testable_variables = list(configs[0].ddict.keys())
    if limit_variables:
        testable_variables = [
            x for x in testable_variables if x in limit_variables
        ]
    info("Running statistical tests on{} variables: {}".format(
        " all" if limit_variables is None else " specified",
        testable_variables))
    for method, measure, label_aggregation in product(methods, measures,
                                                      label_aggregations):
        info("Running statistical testing via {} on {} {}".format(
            method, label_aggregation, measure))
        # get ids and variable values per configuration
        df_inputs = []
        try:
            for run_id in results:
                # find corresp. configuration
                conf = [c for c in configs if c.id == run_id]
                error(
                    "Num configurations found with id: {} is: {} during stat-testing!"
                    .format(run_id, len(conf)),
                    len(conf) != 1)
                conf = conf[0]
                # get variables
                df_row = {k: v for (k, v) in conf.ddict.items() if k != "id"}
                for score in results[run_id][run_mode][measure][
                        label_aggregation]['folds']:
                    df_row["score"] = score
                    df_inputs.append(deepcopy(df_row))
        except:
            warning("Encountered invalid results accessors: {}".format(
                (run_mode, measure, label_aggregation)))
            continue
        data = pd.DataFrame(df_inputs)
        inst = instantiator.Instantiator()
        stat_test = inst.create(method)

        for v, variable in enumerate(testable_variables):
            info("Experiment variable {}/{}: {}".format(
                v + 1, len(testable_variables), variable))
            if limit_variables is not None:
                if variable not in limit_variables:
                    continue
            if len(data[variable]) == len(set(data[variable])):
                warning(
                    "Skipping testing for parameter [{}] due to having only 1 observation per value"
                    .format(variable))
                continue
            if len(set(data[variable])) == 1:
                warning(
                    "Skipping testing for parameter [{}] due to having only 1 unique parameter value: {}"
                    .format(variable, data[variable].values[0]))
                continue
            stat_result = stat_test.run(data["score"], data[variable])
            stat_test.report()
示例#35
0
def sndrcv(pks,
           pkt,
           timeout=None,
           inter=0,
           verbose=None,
           chainCC=0,
           retry=0,
           multi=0):
    if not isinstance(pkt, Gen):
        pkt = SetGen(pkt)

    if verbose is None:
        verbose = conf.verb
    debug.recv = plist.PacketList([], "Unanswered")
    debug.sent = plist.PacketList([], "Sent")
    debug.match = plist.SndRcvList([])
    nbrecv = 0
    ans = []
    # do it here to fix random fields, so that parent and child have the same
    all_stimuli = tobesent = [p for p in pkt]
    notans = len(tobesent)

    hsent = {}
    for i in tobesent:
        h = i.hashret()
        if h in hsent:
            hsent[h].append(i)
        else:
            hsent[h] = [i]
    if retry < 0:
        retry = -retry
        autostop = retry
    else:
        autostop = 0

    while retry >= 0:
        found = 0

        if timeout < 0:
            timeout = None

        rdpipe, wrpipe = os.pipe()
        rdpipe = os.fdopen(rdpipe)
        wrpipe = os.fdopen(wrpipe, "w")

        pid = 1
        try:
            pid = os.fork()
            if pid == 0:
                try:
                    sys.stdin.close()
                    rdpipe.close()
                    try:
                        i = 0
                        if verbose:
                            print "Begin emission:"
                        for p in tobesent:
                            pks.send(p)
                            i += 1
                            time.sleep(inter)
                        if verbose:
                            print "Finished to send %i packets." % i
                    except SystemExit:
                        pass
                    except KeyboardInterrupt:
                        pass
                    except:
                        log_runtime.exception("--- Error in child %i" %
                                              os.getpid())
                        log_runtime.info("--- Error in child %i" % os.getpid())
                finally:
                    try:
                        os.setpgrp()  # Chance process group to avoid ctrl-C
                        sent_times = [
                            p.sent_time for p in all_stimuli if p.sent_time
                        ]
                        cPickle.dump((conf.netcache, sent_times), wrpipe)
                        wrpipe.close()
                    except:
                        pass
            elif pid < 0:
                log_runtime.error("fork error")
            else:
                wrpipe.close()
                stoptime = 0
                remaintime = None
                inmask = [rdpipe, pks]
                try:
                    try:
                        while 1:
                            if stoptime:
                                remaintime = stoptime - time.time()
                                if remaintime <= 0:
                                    break
                            r = None
                            if arch.FREEBSD or arch.DARWIN:
                                inp, out, err = select(inmask, [], [], 0.05)
                                if len(inp) == 0 or pks in inp:
                                    r = pks.nonblock_recv()
                            else:
                                inp, out, err = select(inmask, [], [],
                                                       remaintime)
                                if len(inp) == 0:
                                    break
                                if pks in inp:
                                    r = pks.recv(MTU)
                            if rdpipe in inp:
                                if timeout:
                                    stoptime = time.time() + timeout
                                del (inmask[inmask.index(rdpipe)])
                            if r is None:
                                continue
                            ok = 0
                            h = r.hashret()
                            if h in hsent:
                                hlst = hsent[h]
                                for i in range(len(hlst)):
                                    if r.answers(hlst[i]):
                                        ans.append((hlst[i], r))
                                        if verbose > 1:
                                            os.write(1, "*")
                                        ok = 1
                                        if not multi:
                                            del (hlst[i])
                                            notans -= 1
                                        else:
                                            if not hasattr(
                                                    hlst[i], '_answered'):
                                                notans -= 1
                                            hlst[i]._answered = 1
                                        break
                            if notans == 0 and not multi:
                                break
                            if not ok:
                                if verbose > 1:
                                    os.write(1, ".")
                                nbrecv += 1
                                if conf.debug_match:
                                    debug.recv.append(r)
                    except KeyboardInterrupt:
                        if chainCC:
                            raise
                finally:
                    try:
                        nc, sent_times = cPickle.load(rdpipe)
                    except EOFError:
                        warning(
                            "Child died unexpectedly. Packets may have not been sent %i"
                            % os.getpid())
                    else:
                        conf.netcache.update(nc)
                        for p, t in zip(all_stimuli, sent_times):
                            p.sent_time = t
                    os.waitpid(pid, 0)
        finally:
            if pid == 0:
                os._exit(0)

        remain = reduce(list.__add__, hsent.values(), [])
        if multi:
            remain = filter(lambda p: not hasattr(p, '_answered'), remain)

        if autostop and len(remain) > 0 and len(remain) != len(tobesent):
            retry = autostop

        tobesent = remain
        if len(tobesent) == 0:
            break
        retry -= 1

    if conf.debug_match:
        debug.sent = plist.PacketList(remain[:], "Sent")
        debug.match = plist.SndRcvList(ans[:])

    #clean the ans list to delete the field _answered
    if (multi):
        for s, r in ans:
            if hasattr(s, '_answered'):
                del (s._answered)

    if verbose:
        print "\nReceived %i packets, got %i answers, remaining %i packets" % (
            nbrecv + len(ans), len(ans), notans)
    return plist.SndRcvList(ans), plist.PacketList(remain, "Unanswered")
def build_from_path(config):
    warning("Sampling rate: {}".format(hparams.sample_rate))

    executor = ProcessPoolExecutor(max_workers=config.num_workers)
    futures = []
    index = 1

    base_dir = os.path.dirname(config.metadata_path)
    data_dir = os.path.join(base_dir, config.data_dirname)
    makedirs(data_dir)

    loss_coeff = defaultdict(one)
    if config.metadata_path.endswith("json"):
        with open(config.metadata_path, 'r', encoding='utf-8') as f:
            content = f.read()
        info = json.loads(content)
    elif config.metadata_path.endswith("csv"):
        with open(config.metadata_path, 'r', encoding='utf-8') as f:
            info = {}
            for line in f:
                path, text = line.strip().split('|')
                info[path] = text
    else:
        raise Exception(" [!] Unkown metadata format: {}".format(
            config.metadata_path))

    new_info = {}
    for path in info.keys():
        if not os.path.exists(path):
            new_path = os.path.join(base_dir, path)
            if not os.path.exists(new_path):
                print(" [!] Audio not found: {}".format([path, new_path]))
                continue
        else:
            new_path = path

        new_info[new_path] = info[path]

    info = new_info

    for path in info.keys():
        if type(info[path]) == list:
            if hparams.ignore_recognition_level == 1 and len(info[path]) == 1 or \
                    hparams.ignore_recognition_level == 2:
                loss_coeff[path] = hparams.recognition_loss_coeff

            info[path] = info[path][0]

    ignore_description = {
        0: "use all",
        1: "ignore only unmatched_alignment",
        2: "fully ignore recognitio",
    }

    print(" [!] Skip recognition level: {} ({})". \
            format(hparams.ignore_recognition_level,
                   ignore_description[hparams.ignore_recognition_level]))

    for audio_path, text in info.items():
        if hparams.ignore_recognition_level > 0 and loss_coeff[audio_path] != 1:
            continue

        if base_dir not in audio_path:
            audio_path = os.path.join(base_dir, audio_path)

        try:
            tokens = text_to_sequence(text)
        except:
            continue

        fn = partial(_process_utterance, audio_path, data_dir, tokens,
                     loss_coeff[audio_path])
        futures.append(executor.submit(fn))

    n_frames = [future.result() for future in tqdm(futures)]
    n_frames = [n_frame for n_frame in n_frames if n_frame is not None]

    hours = frames_to_hours(n_frames)

    print(' [*] Loaded metadata for {} examples ({:.2f} hours)'.format(
        len(n_frames), hours))
    print(' [*] Max length: {}'.format(max(n_frames)))
    print(' [*] Min length: {}'.format(min(n_frames)))

    plot_n_frames(n_frames, os.path.join(base_dir,
                                         "n_frames_before_filter.png"))

    min_n_frame = hparams.reduction_factor * hparams.min_iters
    max_n_frame = hparams.reduction_factor * hparams.max_iters - hparams.reduction_factor

    n_frames = [n for n in n_frames if min_n_frame <= n <= max_n_frame]
    hours = frames_to_hours(n_frames)

    print(' [*] After filtered: {} examples ({:.2f} hours)'.format(
        len(n_frames), hours))
    print(' [*] Max length: {}'.format(max(n_frames)))
    print(' [*] Min length: {}'.format(min(n_frames)))

    plot_n_frames(n_frames, os.path.join(base_dir,
                                         "n_frames_after_filter.png"))
示例#37
0
def validate_system_accounts(snapshot):

    step('Verifying system accounts')

    errs = []
    for name in system_accounts:

        # Verify exists
        if name not in snapshot['accounts']:
            errs.append("> missing system account %s" % (name))
            continue

        account = snapshot['accounts'][name]

        # Verify code
        expected = None
        if system_accounts[name]['code']:
            expected = system_accounts[name]['code']

        current = account['code_version'] if account[
            'code_version'] != "0000000000000000000000000000000000000000000000000000000000000000" else None
        real = sha256(account['code'].decode('hex')).digest().encode(
            'hex') if expected else None

        if not (expected == current == real):
            errs.append(
                "> wrong code on %s account\n\texpected : %s\n\tcurrent  : %s\n\tcalculated : %s"
                %
                (name, expected if expected else "<none>",
                 current if current else "<none>", real if real else "<none>"))

        # Verify ABI / Constitution
        abi = system_accounts[name]['abi']
        if abi:
            with open('{0}/abi-files/{1}'.format(DIR_PATH, abi)) as f:
                expected_abi = json.load(f)
                current_abi = account['abi']

                # HACK: special character \u2019
                if name == "eosio":
                    current_abi["ricardian_clauses"][0]["body"] = current_abi[
                        "ricardian_clauses"][0]["body"].replace(
                            "u2019", "\\u2019")

                ddiff = DictDiffer(expected_abi, current_abi)
                added = ddiff.added()
                removed = ddiff.removed()
                changed = ddiff.changed()

                # SPECIAL CASE SKIP
                skip = ["____comment", "error_messages"]
                for s in skip:
                    if s in added: added.remove(s)
                    if s in removed: removed.remove(s)
                    if s in changed: changed.remove(s)

                if len(added) != 0 or len(removed) != 0 or len(changed) != 0:

                    tmp = tempfile.mkstemp()[1]

                    is_constitution = name == "eosio" and "ricardian_clauses" in added or "ricardian_clauses" in removed or "ricardian_clauses" in changed
                    if is_constitution:
                        errs.append(
                            "> Constitution missmatch - please check %s" %
                            (tmp))
                    else:
                        errs.append("> ABI missmatch in %s - please check %s" %
                                    (name, tmp))

                    with open(tmp, 'w') as out:
                        if len(added) != 0:
                            out.write(
                                "> Chain ABI has '%s' and expected ABI dont\n"
                                % (','.join(added)))
                        if len(removed) != 0:
                            out.write(
                                "> Expected ABI has %s and chain ABI dont\n" %
                                (','.join(removed)))
                        if len(changed) != 0:
                            out.write("> They both differ '%s'\n" %
                                      (','.join(changed)))

                        out.write('\n')
                        out.write("# Chain ABI\n")
                        out.write(
                            json.dumps(current_abi, indent=2, sort_keys=True))
                        out.write('\n')
                        out.write("# Expected ABI\n")
                        out.write(
                            json.dumps(expected_abi, indent=2, sort_keys=True))
                        out.write('\n')

        # Verify privileged
        if account['privileged'] != system_accounts[name]['privileged']:
            errs.append("> %s account wrong privileged setting" % (name))

        # Verify resignement
        if name != "eosio.null" and name != "eosio.prods":
            actor = system_accounts[name]['actor']
            permission = system_accounts[name]['permission']
            if authority_controlled_by_one_actor(account['permissions']["owner"], actor, permission) != True or \
               authority_controlled_by_one_actor(account['permissions']["owner"], actor, permission) != True:
                errs.append("> %s account NOT PROPERLY RESIGNED" % (name))

    if len(errs):
        warning()
        for er in errs:
            print er
    else:
        success()

    return True
示例#38
0
文件: test.py 项目: youht88/bc
item = {
    "outPrvkey": key1[0],
    "outPubkey": key1[1],
    "inPubkey": key2[1],
    "amount": 999
}

new_transaction = Transaction(item)

transaction_dict = new_transaction.to_dict()
for peer in PEERS:
    try:
        res = requests.post("%stransacted" % peer, json=transaction_dict)
    except Exception as e:
        print("%s error is %s" % (peer, e))
utils.warning("transaction广播完成")

utils.success("测试mine广播")

possible_transactions = sync.sync_possible_transactions()
print('-' * 20, '\n', possible_transactions)
possible_transactions_dict = []
for item in possible_transactions:
    possible_transactions_dict.append(item.to_dict())
    os.remove(BROADCASTED_TRANSACTION_DIR + item.hash + ".json")
new_block = mine.mine_for_block(possible_transactions_dict)

block_dict = new_block.to_dict()
for peer in PEERS:
    try:
        res = requests.post("%smined" % peer, json=block_dict)
    def get_player_identity(self, username):
        response = self.__web_interface.get_players()
        player_tree = html.fromstring(response.content)

        theads_path = "//table[@id=\"players\"]/thead//th[position()>1]" \
                      "//text()"
        theads_result = player_tree.xpath(theads_path)

        name_col = theads_result.index("Player name")
        ip_col = theads_result.index("IP")
        sid_col = theads_result.index("Steam ID")
        nid_col = theads_result.index("Unique Net ID")
        player_key_col = 5

        trows_path = "//table[@id=\"players\"]/tbody//td"
        trows_result = player_tree.xpath(trows_path)
        trows_result = [
            trow.text if trow.text else "" for trow in trows_result
        ]
        trows_result = [
            list(group)
            for k, group in groupby(trows_result, lambda x: x == "\xa0")
            if not k
        ]

        player_keys_path = "//table[@id=\"players\"]/tbody" \
                           "//input[@name=\"playerkey\"]//@value"
        player_keys_result = player_tree.xpath(player_keys_path)
        for i, player_key in enumerate(player_keys_result):
            trows_result[i][player_key_col] = player_key

        # Duplicate usernames cannot be identified reliably
        players_found = 0

        for player_row in trows_result:
            if player_row[name_col] == username:
                players_found += 1
                ip = player_row[ip_col]
                sid = player_row[sid_col]
                nid = player_row[nid_col]
                player_key = player_row[player_key_col]
                country, country_code = get_country(ip)

        if players_found != 1:
            warning(_("Couldn't find identify player: {}").format(username))
            return {
                'ip': None,
                'country': "Unknown",
                'country_code': "??",
                'steam_id': None,
                'network_id': None,
                'player_key': None
            }

        return {
            'ip': ip,
            'country': country,
            'country_code': country_code,
            'steam_id': sid,
            'network_id': nid,
            'player_key': player_key
        }
示例#40
0
def singleWorkpackageXML(wp, wpwiki, parser, wpcount):
    # print "analyzing wp: " + wp
    # print "wiki code: \n" + wpwiki

    ### main adminstration information:
    wpMain = parser.getListAsDict(
        parser.getSection(wpwiki, "Administrative information", 2))
    wpMain['Number'] = wpcount

    #### get the deliverables
    wpDelXML = ""
    for deliv in parser.getTable(parser.getSection(wpwiki, "Deliverables", 3)):
        wpDelXML += '<deliverable id="' + deliv["Label"] +'">\n'  + \
                    dictAsXML(deliv, parser, ["Contributors", "Producing task(s)"]) + \
                    "</deliverable>\n"

    ## get the milestones
    wpMilestonesXML = ""
    for ms in parser.getTable(parser.getSection(wpwiki, "Milestones", 3)):
        wpMilestonesXML += '<milestone id="' + ms["Label"] + '">\n'  + \
                    dictAsXML(ms, parser, ["Contributors", "Producing task(s)"]) + \
                    "</milestone>\n"

    ## get the tasks
    wpTasksXML = ""
    newTasks = set([])
    for task in parser.getTable(parser.getSection(wpwiki, "Tasks", 3)):
        if task['Label'] not in newTasks:
            # print task['Label']
            newTasks.add(task['Label'])
            task['Main'] = True
            # plug out corresponding description
            # print wpwiki
            td = parser.getSection(
                wpwiki, "Task [dD]escription: " + task['Label'].strip(), 3)
            obj = parser.getSection(td, "Objectives", 4)
            if options.verbose:
                print 'task objectives', obj
            descr = parser.getSection(td, "Description of work", 4)
            # print "Objectives: ", obj
            # print "Descr: ", descr
            task['taskobjectives'] = obj
            task['taskdescription'] = descr
        else:
            task['Main'] = False
        # pp(task)

        try:
            wpTasksXML += '<task id="' + task["Label"] + '">\n'  + \
                        dictAsXML(task, parser) + \
                        "</task>\n"
        except:
            pp(task)
            utils.warning(
                "Could not properly convert task in workpackage generation, wp "
                +
                # wpwiki + ", "  +
                task["Label"])
            wpTasksXML += '<task id="' + task["Label"] + '">\n'  + \
                          "Conversion error occured here!" + \
                          "</task>\n"

    ## get the effort - that's a little bit more difficult:
    wpEffortXML = ""
    for effort in parser.getTable(parser.getSection(wpwiki, "Effort", 3)):
        wpEffortXML += '<partner id="' + effort["Partner"] + '">\n'
        for k, v in effort.iteritems():
            if not k == "Partner":
                wpEffortXML += "<taskeffort><task>" + k + \
                               "</task><resources>" + v + \
                               "</resources></taskeffort>\n"
                # a bit of error checking:
                if not k in wpTasksXML:
                    print "Warning: assigning effort to task " + k + " which is not defined in this wp " + wp
        wpEffortXML += '</partner>\n'

    ## and the final workpackage string
    wpXML =  '<workpackage id="' + wp + '">' + \
            dictAsXML (wpMain) + \
            "<objectives>\n" + parser.getLaTeX(parser.getSection(wpwiki, "Objectives", 2).strip()) + "</objectives>\n" + \
            "<wpdescription>\n" + parser.getLaTeX(parser.getSection(wpwiki, "WP Description", 2).strip()) + "</wpdescription>\n" + \
            wpDelXML + \
            wpMilestonesXML + \
            wpTasksXML + \
            wpEffortXML + \
            "</workpackage>"

    utils.writefile(
        wpXML, os.path.join(config.get('PathNames', 'xmlwppath'), wp + '.xml'))

    return wpMain
示例#41
0
    def map_text_nonpartial_load(self):
        # initialize unknown token embedding, if it's not defined
        if self.unknown_word_token not in self.embeddings and self.map_missing_unks:
            warning(
                "[{}] unknown token missing from embeddings, adding it as zero vector."
                .format(self.unknown_word_token))
            self.embeddings.loc[self.unknown_word_token] = np.zeros(
                self.dimension)

        unknown_token_index = self.embeddings.index.get_loc(
            self.unknown_word_token)
        self.unknown_element_index = unknown_token_index
        # loop over input text bundles
        for dset_idx, docs in enumerate(self.text):
            num_docs = len(docs)
            desc = "Embedding mapping for text bundle {}/{}, with {} texts".format(
                dset_idx + 1, len(self.text), num_docs)
            with tqdm.tqdm(total=len(self.text[dset_idx]),
                           ascii=True,
                           desc=desc) as pbar:
                word_stats = WordEmbeddingStats(self.vocabulary,
                                                self.embeddings.index)
                for j, doc_wp_list in enumerate(self.text[dset_idx]):
                    # drop POS
                    word_list = [wp[0] for wp in doc_wp_list]
                    # debug("Text {}/{} with {} words".format(j + 1, num_documents, len(word_list)))
                    # check present & missing words
                    doc_indices = []
                    present_map = {}
                    present_index_map = {}
                    for w, word in enumerate(word_list):
                        word_in_embedding_vocab = word in self.embeddings.index
                        word_stats.update_word_stats(word,
                                                     word_in_embedding_vocab)
                        present_map[word] = word_in_embedding_vocab
                        present_map[word] = word_in_embedding_vocab
                        if not word_in_embedding_vocab:
                            if not self.map_missing_unks:
                                continue
                            else:
                                word_index = unknown_token_index

                        else:
                            word_index = self.embeddings.index.get_loc(word)
                        doc_indices.append(word_index)
                    # handle missing
                    word_list = [w for w in word_list if present_map[w]] if not self.map_missing_unks else \
                        [w if present_map[w] else self.unknown_word_token for w in word_list]

                    self.present_words.update(
                        [w for w in present_map if present_map[w]])
                    error("No words present in document.", len(word_list) == 0)

                    # just save indices
                    doc_indices = np.asarray(doc_indices, np.int32)
                    self.vector_indices[dset_idx].append(doc_indices)
                    self.elements_per_instance[dset_idx].append(
                        len(doc_indices))
                    pbar.update()

            word_stats.print_word_stats()
            self.elements_per_instance[dset_idx] = np.asarray(
                self.elements_per_instance[dset_idx], np.int32)

        self.vector_indices, new_embedding_index = realign_embedding_index(
            self.vector_indices,
            np.asarray(list(range(len(self.embeddings.index)))))
        self.embeddings = self.embeddings.iloc[new_embedding_index].values
        # write
        info("Writing embedding mapping to {}".format(
            self.serialization_path_preprocessed))
        write_pickled(self.serialization_path_preprocessed,
                      self.get_all_preprocessed())
示例#42
0
def add_buffer(ph_bufpdbName="",
               ph_bufitpName="",
               ph_bufqqA=[1],
               ph_bufqqB=[0],
               ph_bufMargin=2.5,
               attempts=100000):
    # This function writes a dummy .pdb containing the default buffer (ion).
    def writeDefaultPDB():
        with open("defaultBuffer.pdb", 'w') as file:
            file.write("TITLE     BUFFER PARTICLE\n")
            file.write("MODEL        1\n")
            file.write(
                "ATOM      1  NA  BUF     1     110.896   2.872  68.855  1.00  0.00            \n"
            )
            file.write("TER\n")
            file.write("ENDMDL\n")

    # This function writes the topology for the default buffer (ion).
    # Note: charge should not be 0 because then some interactions are not generated???
    # Update: this was fixed by Berk in e2c2340.
    def writeDefaultITP():
        with open("defaultBuffer.itp", 'w') as file:
            file.write("[ moleculetype ]\n")
            file.write("; molname	nrexcl\n")
            file.write("BUF		1\n\n")
            file.write("[ atoms ]\n")
            file.write(
                "; id	at type		res nr	residu name at name  cg nr	charge	 \n")
            file.write("1		SOD			1		   BUF			NA		   1		0	 \n\n")
            file.write("#ifdef POSRES_BUF\n")
            file.write("; Position restraint for each buffer ion\n")
            file.write("[ position_restraints ]\n")
            file.write(";  i funct       fcx        fcy        fcz\n")
            file.write("   1    1       1000       1000       1000\n")
            file.write("#endif\n")

    # Skip this whole step if we don't need it.
    if not (universe.get('ph_constantpH')
            and universe.get('ph_QQleveling') in [1, 2]):
        utils.update(
            "add_buffer",
            "either ph_constantpH is False or ph_QQleveling = 0 --> skipping..."
        )
        return

    # Make sure that the sum of the charges in state A and B are correct.
    if (sum(ph_bufqqA) != 1 or sum(ph_bufqqB) != 0):
        utils.warning(
            "add_buffer",
            "buffer charges incorrectly specified! sums must be 1 and 0")
        universe.get('ph_bufqqA')
        universe.get('ph_bufqqB')

    # Determine whether we use the default or a custom buffer.
    useDefault = False
    if (ph_bufpdbName == "" and ph_bufitpName == ""):
        utils.update("add_buffer", "using default (built-in) buffer...")
        useDefault = True
    elif (ph_bufpdbName == "" and ph_bufitpName != ""):
        utils.warning(
            "add_buffer",
            "ph_bufitpName not specified, resorting to default buffer!")
        useDefault = True
    elif (ph_bufpdbName != "" and ph_bufitpName == ""):
        utils.warning(
            "add_buffer",
            "ph_bufpdbName not specified, resorting to default buffer!")
        useDefault = True
    else:
        utils.update("add_buffer", "using custom buffer...")

    if (useDefault):
        # Check to make sure that the charges for the default buffer are correct.
        if (ph_bufqqA != [1] or ph_bufqqB != [0]):
            utils.warning(
                "add_buffer",
                "buffer charges incorrectly specified for default buffer!")
            universe.get('ph_bufqqA')
            universe.get('ph_bufqqB')

        # Generate the files for the default buffer and update data members.
        writeDefaultPDB()
        ph_bufpdbName = "defaultBuffer.pdb"
        writeDefaultITP()
        ph_bufitpName = "defaultBuffer.itp"

    # Get the number of buffer molecules we need.
    ph_bufnmol = 0
    for lambdaType in universe.get('ph_lambdaTypes'):
        ph_bufnmol += countRes(lambdaType.d_resname)

    utils.update(
        "add_buffer",
        "will attempt to add {0} buffer molecule(s)...".format(ph_bufnmol))

    # RUN GROMACS INSERT-MOLECULES COMMAND
    os.system(
        "touch vdwradii.dat")  # we need this dummy file for this to work.

    os.system(
        "gmx insert-molecules -f {0} -o {1}_BUF.pdb -ci {2} -nmol {3} -scale 1.0 -radius {4} -try {5} >> builder.log 2>&1"
        .format(
            universe.get('d_nameList')[-1], universe.get('d_pdbName'),
            ph_bufpdbName, ph_bufnmol, 0.5 * ph_bufMargin,
            int(attempts / ph_bufnmol)))

    os.remove("vdwradii.dat")  # clean dummy file.

    # To update d_residues.
    load("{0}_BUF.pdb".format(universe.get('d_pdbName')))

    # Give user a warning if there wasn't enough space.
    actual = countRes('BUF')
    if actual < ph_bufnmol:
        utils.warning(
            "add_buffer",
            "only {0}/{1} requested buffer molecules inserted after {2} attempts,"
            .format(actual, ph_bufnmol, attempts))
        utils.warning(
            "add_buffer",
            "try decreasing ph_bufMargin (={0}nm) or increasing d_boxMargin (={1}nm)..."
            .format(ph_bufMargin, universe.get('d_boxMargin')))
    else:
        utils.update(
            "add_buffer",
            "succesfully added {0} buffer molecule(s)...".format(actual))

    # To add buffer topology to topol.top.
    utils.update("add_buffer", "updating topology...")

    if (useDefault):
        os.remove("defaultBuffer.pdb")  # Remove dummy .pdb file.
    else:
        os.system("cp {} .".format(ph_bufitpName))  # Copy to working dir.

    topol.add_mol(os.path.basename(ph_bufitpName), "Include buffer topology",
                  'BUF', actual)

    # Set some parameters in the universe.
    universe.add('ph_bufpdbName', ph_bufpdbName)
    universe.add('ph_bufitpName', ph_bufitpName)
    universe.add('ph_bufqqA', ph_bufqqA)
    universe.add('ph_bufqqB', ph_bufqqB)
    universe.add('ph_bufMargin', ph_bufMargin)
    universe.add('ph_bufnmol', actual)

    # To update d_nameList.
    utils.add_to_nameList("{0}_BUF.pdb".format(universe.get('d_pdbName')))
示例#43
0
    def buildFigure(self, t):
        """An attempt to allow direct figure inclusion. See
        documentation for details on synatx and limitations."""
        lines = t.split('\n')
        latex = ""
        # print self.figureRE
        for l in lines:
            # print l
            m = re.search(self.figureRE, l)
            if m:
                ## print "line with figure: " , l
                ## print 'pre', m.group('pre')
                ## print 'fs', m.group('fs')
                ## print 'post', m.group('post')

                # s is the constructed replacmenet string. Might contain warning
                s = ""

                # print "recognized figure"
                kvstring = m.group('fs')
                # print kvstring, self.figureKeys
                r = re.compile(self.figureKeys)
                d = self.extractFigureKeys(kvstring)

                # pp(d)
                # error checking: is the figure there, in a good format?
                if not d.has_key("file"):
                    utils.warning(
                        "You are trying to include a graphic, but did not specify the filename!"
                    )
                    continue

                # strip of any extension of the filename first - TODO
                mm = re.search(r'([^.]*?)\..*', d['file'])
                if mm:
                    # utils.warning ("No need to specify file extension for graphic inclusion, file: " + d['file'])
                    d['file'] = mm.group(1)

                self.getFileFromWiki(d['file'])

                # check for PDF first
                crucialFailure = False

                if ((not os.path.exists(
                        os.path.join(
                            self.config.get("PathNames",
                                            'manuallatexfigurespath'),
                            d["file"] + ".pdf"))) and (not os.path.exists(
                                os.path.join(
                                    self.config.get("PathNames",
                                                    'uploadedfigurespath'),
                                    d["file"] + ".pdf")))):
                    ## w = "You are trying to include file " + d["file"] +  \
                    ##     ", but no PDF file of that name exists in " + \
                    ##     self.config.get("PathNames", 'manuallatexfigurespath') + \
                    ##     ' or in ' +  self.config.get("PathNames", 'uploadedfigurespath')
                    ## utils.warning (w)
                    ## s += w

                    # print (os.path.join(
                    #     self.config.get("PathNames", 'manuallatexfigurespath'),
                    #     d["file"]))
                    if ((not glob.glob(
                            os.path.join(
                                self.config.get("PathNames",
                                                'manuallatexfigurespath'),
                                d["file"] + ".*")))
                            and (not glob.glob(
                                os.path.join(
                                    self.config.get("PathNames",
                                                    'uploadedfigurespath'),
                                    d["file"] + ".*")))):
                        w = ("You are trying to include file " +
                             d["file"] +
                             ", but no file with any extension of that name was found in " +
                             self.config.get("PathNames",
                                             'manuallatexfigurespath')  + \
                             ' or in ' +  self.config.get("PathNames", 'uploadedfigurespath'))
                        utils.warning(w)
                        # that overwrittes a potential warning about pdf file not found
                        s = w
                        crucialFailure = True
                    else:
                        w = (
                            "You are trying to include file " + d["file"] +
                            ", and at least some file with that basename was found in "
                            + self.config.get("PathNames",
                                              'manuallatexfigurespath') +
                            " but you REALLY want to put PDF files there for acceptable results"
                        )

                        utils.warning(w)
                        s = w

                if not d.has_key("label"):
                    w =  ("You are not assigning a label to figure " + d["file"] + \
                             " - that will make cross-referencing impossible")
                    utils.warning(w)
                    s += " -- " + w

                if not d.has_key("caption"):
                    w =  ("You are not giving a caption to figure " + d["file"] + \
                             " - that will look VERY strange in the text!")
                    utils.warning(w)
                    s += " -- " + w

                st = ""
                if not crucialFailure:
                    # no warnings produced, so let's include the figure
                    st = "\\begin{figure}{\\centering\\includegraphics*[width="
                    if d.has_key("latexwidth"):
                        st += d["latexwidth"]
                    else:
                        st += "0.8"
                    st += "\\textwidth]"
                    st += "{" + d["file"] + "}"
                    if d.has_key("caption"):
                        st += "\\caption{" + d["caption"] + "}"
                    if d.has_key("label"):
                        # st += "\\label{fig:" + d["label"] + "}"
                        st += "\\label{" + d["label"] + "}"
                    st += "}\\end{figure}"

                if s:
                    s = st + "\\fxwarning{" + s + "}"
                else:
                    s = st

                #  dont ignore  the rest of the line:
                ## print "--------------"
                ## print l
                ## print m.group('pre') + s + m.group('post')
                # latex += l
                latex += m.group('pre') + s + m.group('post')
            else:
                # print "no match"
                latex += l + "\n"

        return latex
示例#44
0
def mss_ignored():
    ignored = sorted(f[:-4] for f in listdir('data') if f.startswith('mss') \
                     and f.endswith('_B.mod') and int(f[3:5]) > 35)
    warning('Ignoring the MSS family,', len(ignored), 'files')
    return ignored
示例#45
0
def print_some(print_count, to_print):
    if print_count == 0: warning()
    if print_count <= 5:
        print "** {0}".format(to_print)
        print_count += 1
    return print_count
示例#46
0
    def report_overall_results(self, validation_description, write_folder):
        """Function to report learning results
        """
        if self.is_supervised():
            self.merge_and_show_test_label_distributions()
            # compute overall predicted labels across all folds, if any
            overall_predictions = np.concatenate(
                [np.argmax(x, axis=1) for x in self.predictions['run']])
            # count distribution occurences (prediction total average)
            self.show_label_distribution(
                overall_predictions,
                message="Predicted label (average) distribution")
            info("==============================")
            self.analyze_overall_errors()
        info("==============================")

        info("Printing overall results:")
        info("Run types: {}".format("/".join(self.preferred_types)))
        info("Measures: {}".format("/".join(self.preferred_measures)))
        info("Label aggregations: {}".format("/".join(
            self.label_aggregations)))
        info("Fold aggregations: {}".format("/".join(self.fold_aggregations)))

        info("------------------------------")
        print_count = 0
        axes_dict = self.get_evaluation_axes_to_print()
        for run_type in axes_dict:
            for measure in axes_dict[run_type]:
                if self.do_multilabel:
                    self.performance[run_type][
                        measure] = self.calc_fold_score_stats(
                            self.performance[run_type][measure])
                    print_count += self.print_performance(run_type, measure)
                else:
                    for aggregation in axes_dict[run_type][measure]:
                        self.performance[run_type][measure][
                            aggregation] = self.calc_fold_score_stats(
                                self.performance[run_type][measure]
                                [aggregation])
                        print_count += self.print_performance(
                            run_type, measure, aggregation)
            info("------------------------------")
        info("==============================")
        if not print_count:
            warning(
                "No suitable run / measure /aggregation specified in the configuration."
            )

        if write_folder is not None:
            # write the results in csv in the results directory
            # entries in a run_type - measure configuration list are the foldwise scores, followed by the mean
            # write results in json and pkl
            write_dict = {k: v for (k, v) in self.performance.items()}
            with open(join(write_folder, "results.json"), "w") as f:
                json.dump(write_dict, f, indent=2)
            with open(join(write_folder, "results.pkl"), "wb") as f:
                pickle.dump(write_dict, f)
            # write misc
            with open(join(write_folder, "error_analysis.pkl"), "wb") as f:
                pickle.dump(self.error_analysis, f)
            with open(join(write_folder, "confusion_matrices.pkl"), "wb") as f:
                pickle.dump(self.confusion_matrices, f)
            with open(join(write_folder, "label_distributions.json"),
                      "w") as f:
                json.dump(self.all_label_distributions, f)
示例#47
0
def validate_extra_accounts(snapshot, balances, args):

    step('Verifying extra accounts')

    extras = []
    total_balance = 0
    for account in snapshot['accounts']:
        if account not in balances and account not in system_accounts:
            extras.append(account)
            l, c, n = get_account_stake(snapshot, account, args.core_symbol)
            total_balance += l + c + n

    not_abp = []
    not_resigned = []
    abps = [
        abp['producer_name'] for abp in snapshot['block_header_state']
        ['active_schedule']['producers']
    ]

    for extra in extras:
        if extra not in abps:
            not_abp.append(extra)

        if authority_is_empty(snapshot['accounts'][extra]["permissions"]["active"]) or \
           authority_is_empty(snapshot['accounts'][extra]["permissions"]["owner"]):
            not_resigned.append(extra)

    ok = True

    if len(extra) > 21:
        if ok:
            warning()
            ok = False
        print "> More than 21 extra accounts found"

    if len(not_abp) > 0:
        if ok:
            warning()
            ok = False
        print "> At least one extra account is not an ABP"

    if total_balance > 0:
        if ok:
            warning()
            ok = False
        print "> At least one extra account has liquid/staked balance"

    if not ok:
        tmp = tempfile.mkstemp()[1]
        with open(tmp, 'w') as f:
            f.write("Extra accounts:")
            f.write('extras:' + str(extras) + '\n')
            f.write('not_abp:' + str(not_abp) + '\n')
            f.write('total_balance:' + str(total_balance) + '\n')
            f.write('not_resigned:' + str(not_resigned) + '\n')

        print "> Please check %s" % tmp
    else:
        success()

    return True
示例#48
0
def api():
    data = request.values.to_dict()
    try:
        command = re.split("\s+", data["text"])
        slack_id = data["team_id"]
        team_domain = data["team_domain"]
        channel = data["channel_id"]
    except KeyError:
        abort(400)

    # ensuring that the request comes from slack
    if not valid_slack_request(request):
        return abort(404)

    team = db.session.query(Team).filter_by(slack_id=slack_id).first()
    if not team:
        return error(
            "You are not registered in our proxy server, try removig the app "
            "and adding it to slack again.")

    if command[0] == "help":
        fields = [
            {
                "title": "`/pass` _or_ `/pass list`",
                "value": "To list the available passwords in this channel.",
                "short": True,
            },
            {
                "title":
                "`/pass <secret>` or `/pass show <secret>`",
                "value":
                ("To retrieve a one time use link with the secret content, "
                 "this link expires in 15 minutes."),
                "short":
                True,
            },
            {
                "title":
                "`/pass insert <secret>`",
                "value": ("To retrieve the link with the editor to create the "
                          "secret, this link expires in 15 minutes."),
                "short":
                True,
            },
            {
                "title": "`/pass remove <secret>`",
                "value": ("To remove the secret from the group."),
                "short": True,
            },
            {
                "title":
                "`/pass configure` or `/pass configure <server_url>`",
                "value":
                ("To setup the password storage, it is only necessary "
                 "to execute it once."),
                "short":
                True,
            },
        ]
        return jsonify({
            "attachments": [{
                "fallback":
                ("_Usage:_ https://github.com/talpor/password-scale"),
                "text":
                "*_Usage:_*",
                "fields":
                fields,
            }]
        })

    if command[0] == "configure" and len(command) == 2:
        url = command[1]
        if not validators.url(url):
            return error("Invalid URL format, use: https://<domain>")

        if team.url:
            msg = ("This team is already configured, you want to replace "
                   "the password server?")
            return jsonify({
                "attachments": [{
                    "fallback":
                    "This team already configured",
                    "text":
                    msg,
                    "callback_id":
                    "configure_password_server",
                    "color":
                    "warning",
                    "actions": [
                        {
                            "name": "reconfigure_server",
                            "text": "Yes",
                            "type": "button",
                            "value": url,
                        },
                        {
                            "name": "no_reconfigure",
                            "text": "No",
                            "style": "danger",
                            "type": "button",
                            "value": "no",
                        },
                    ],
                }]
            })

        if not team.register_server(url):
            return error("Unable to retrieve the _public_key_ "
                         "from the server".format(team_domain))

        return success("{} team successfully configured!".format(team_domain))

    if command[0] == "configure" and len(command) == 1 or not team.url:
        color = "warning"
        if team.url:
            msg = (
                "*{}* team already have a server configured, if you want to "
                "swap select some of the options below".format(team.name))
        elif command[0] == "configure":
            color = "good"
            msg = "What type of server do you want to use?"
        else:
            msg = (
                "*{}* team does not have a password server configured, select "
                "one of the options below to start.".format(team_domain))

        warning_msg = (
            "This is a test server, any information stored on this server "
            "can be deleted at any moment without prior notice!")
        return jsonify({
            "attachments": [{
                "fallback":
                msg,
                "text":
                msg,
                "color":
                color,
                "callback_id":
                "configure_password_server",
                "actions": [
                    {
                        "name": "use_demo_server",
                        "text": "Use Test Server",
                        "type": "button",
                        "value": "no",
                        "confirm": {
                            "title": "Confirm",
                            "text": warning_msg,
                            "ok_text": "I understand",
                            "dismiss_text": "No",
                        },
                    },
                    {
                        "text": "Request Private Server",
                        "type": "button",
                        "url": CONFIGURATION_GUIDE_URL,
                    },
                    {
                        "name": "no_configure",
                        "text": "Later",
                        "type": "button",
                        "value": "no",
                    },
                ],
            }]
        })
    if command[0] in ["", "list"]:
        try:
            dir_ls = cmd.list(team, channel)
        except SlashpassError as e:
            return error("_{}_".format(e.message))

        if not dir_ls:
            return warning(
                "You have not passwords created for this channel, use "
                "`/pass insert <secret>` to create the first one!")

        return jsonify({
            "attachments": [{
                "fallback":
                dir_ls,
                "text":
                "Password Store\n{}".format(dir_ls),
                "footer": ("Use the command `/pass <key_name>` to retrieve "
                           "some of the keys"),
            }]
        })

    if command[0] == "insert" and len(command) == 2:
        app = command[1]
        token = cmd.generate_insert_token(team, channel, app)

        msg = "Adding password for *{}* in this channel".format(app)
        return jsonify({
            "attachments": [{
                "fallback":
                msg,
                "text":
                msg,
                "footer":
                "This editor will be valid for 15 minutes",
                "color":
                "good",
                "actions": [{
                    "text": "Open editor",
                    "style": "primary",
                    "type": "button",
                    "url": "{}/insert/{}".format(SITE, token),
                }],
            }]
        })

    if command[0] == "remove" and len(command) == 2:
        app = command[1]
        if cmd.remove(team, channel, app):
            return success(
                "The secret *{}* was removed successfully.".format(app))
        return warning("Looks like the secret *{}* is not in your repository "
                       ":thinking_face: use the command `/pass list` "
                       "to verify your storage.".format(app))

    if command[0] == "show" and len(command) == 2:
        app = command[1]
    else:
        app = command[0]

    onetime_link = cmd.show(team, channel, app)
    if onetime_link:
        return jsonify({
            "attachments": [{
                "fallback":
                "Password: {}".format(onetime_link),
                "text":
                "Password for *{}*".format(app),
                "footer":
                "This secret will be valid for 15 minutes",
                "color":
                "good",
                "actions": [{
                    "text": "Open secret",
                    "style": "primary",
                    "type": "button",
                    "url": onetime_link,
                }],
            }]
        })
    else:
        return warning("*{}* is not in the password store.".format(app))
示例#49
0
def create_fc_objects(f, path):
    """ Creates supported objects on scene. Iterates over
        <mainlist> items and tries to recreate the commands in
        the current opened scene. """
    movement = (0.0, 0.0, 0.0)
    rotation = (0.0, 0.0, 0.0, 0.0)
    mk = ("void", "0")
    drawmode = "full"
    elementnum = 0
    to_add_dsph = dict()

    root = ElementTree.fromstring(f)
    mainlist = root.findall("./casedef/geometry/commands/mainlist/*")
    for command in mainlist:
        if command.tag == "matrixreset":
            movement = (0.0, 0.0, 0.0)
            rotation = (0.0, 0.0, 0.0, 0.0)
            pass
        elif command.tag == "setmkfluid":
            mk = ("fluid", command.attrib["mk"])
            pass
        elif command.tag == "setmkbound":
            mk = ("bound", command.attrib["mk"])
            pass
        elif command.tag == "setdrawmode":
            drawmode = command.attrib["mode"]
        elif command.tag == "move":
            movement = (float(command.attrib["x"]), float(command.attrib["y"]), float(command.attrib["z"]))
        elif command.tag == "rotate":
            rotation = (float(command.attrib["ang"]), float(command.attrib["x"]), float(command.attrib["y"]),
                        float(command.attrib["z"]))
        elif command.tag == "drawbox":
            for subcommand in command:
                point = (0.0, 0.0, 0.0)
                size = (1.0, 1.0, 1.0)
                if subcommand.tag == "boxfill":
                    pass
                elif subcommand.tag == "point":
                    point = (
                        float(subcommand.attrib["x"]), float(subcommand.attrib["y"]), float(subcommand.attrib["z"]))
                elif subcommand.tag == "size":
                    size = (float(subcommand.attrib["x"]), float(subcommand.attrib["y"]), float(subcommand.attrib["z"]))
                else:
                    utils.warning(
                        "Modifier unknown (" + subcommand.tag + ") for the command: " + command.tag + ". Ignoring...")
            # Box creation in FreeCAD
            FreeCAD.ActiveDocument.addObject("Part::Box", "Box" + str(elementnum))
            FreeCAD.ActiveDocument.ActiveObject.Label = "Box" + str(elementnum)
            # noinspection PyArgumentList
            FreeCAD.ActiveDocument.getObject("Box" + str(elementnum)).Placement = FreeCAD.Placement(
                FreeCAD.Vector((point[0] + movement[0]) * 1000, (point[1] + movement[1]) * 1000,
                               (point[2] + movement[2]) * 1000),
                FreeCAD.Rotation(FreeCAD.Vector(rotation[1], rotation[2], rotation[3]), rotation[0]))
            FreeCAD.ActiveDocument.getObject("Box" + str(elementnum)).Length = str(size[0]) + ' m'
            FreeCAD.ActiveDocument.getObject("Box" + str(elementnum)).Width = str(size[1]) + ' m'
            FreeCAD.ActiveDocument.getObject("Box" + str(elementnum)).Height = str(size[2]) + ' m'
            # Suscribe Box for creation in DSPH Objects
            # Structure: [name] = [mknumber, type, fill]
            to_add_dsph["Box" + str(elementnum)] = [int(mk[1]), mk[0], drawmode]
        elif command.tag == "drawcylinder":
            point = [0, 0, 0]
            top_point = [0, 0, 0]
            radius = float(command.attrib["radius"])
            points_found = 0
            for subcommand in command:
                if subcommand.tag == "point":
                    if points_found == 0:
                        point = [float(subcommand.attrib["x"]), float(subcommand.attrib["y"]),
                                 float(subcommand.attrib["z"])]
                    elif points_found == 1:
                        top_point = [float(subcommand.attrib["x"]), float(subcommand.attrib["y"]),
                                     float(subcommand.attrib["z"])]
                    else:
                        utils.warning("Found more than two points in a cylinder definition. Ignoring")
                    points_found += 1

            # Cylinder creation in FreeCAD
            FreeCAD.ActiveDocument.addObject("Part::Cylinder", "Cylinder" + str(elementnum))
            FreeCAD.ActiveDocument.ActiveObject.Label = "Cylinder" + str(elementnum)
            # noinspection PyArgumentList
            FreeCAD.ActiveDocument.getObject("Cylinder" + str(elementnum)).Placement = FreeCAD.Placement(
                FreeCAD.Vector((point[0] + movement[0]) * 1000, (point[1] + movement[1]) * 1000,
                               (point[2] + movement[2]) * 1000),
                FreeCAD.Rotation(FreeCAD.Vector(rotation[1], rotation[2], rotation[3]), rotation[0]))
            FreeCAD.ActiveDocument.getObject("Cylinder" + str(elementnum)).Radius = str(radius) + ' m'
            FreeCAD.ActiveDocument.getObject("Cylinder" + str(elementnum)).Height = (top_point[2] - point[2]) * 1000
            # Suscribe Cylinder for creation in DSPH Objects
            # Structure: [name] = [mknumber, type, fill]
            to_add_dsph["Cylinder" + str(elementnum)] = [int(mk[1]), mk[0], drawmode]
        elif command.tag == "drawsphere":
            point = [0, 0, 0]
            radius = float(command.attrib["radius"])
            for subcommand in command:
                if subcommand.tag == "point":
                    point = [float(subcommand.attrib["x"]), float(subcommand.attrib["y"]),
                             float(subcommand.attrib["z"])]
            # Sphere creation in FreeCAD
            FreeCAD.ActiveDocument.addObject("Part::Sphere", "Sphere" + str(elementnum))
            FreeCAD.ActiveDocument.ActiveObject.Label = "Sphere" + str(elementnum)
            # noinspection PyArgumentList
            FreeCAD.ActiveDocument.getObject("Sphere" + str(elementnum)).Placement = FreeCAD.Placement(
                FreeCAD.Vector((point[0] + movement[0]) * 1000, (point[1] + movement[1]) * 1000,
                               (point[2] + movement[2]) * 1000),
                FreeCAD.Rotation(FreeCAD.Vector(rotation[1], rotation[2], rotation[3]), rotation[0]))
            FreeCAD.ActiveDocument.getObject("Sphere" + str(elementnum)).Radius = str(radius) + ' m'
            # Suscribe Sphere for creation in DSPH Objects
            # Structure: [name] = [mknumber, type, fill]
            to_add_dsph["Sphere" + str(elementnum)] = [int(mk[1]), mk[0], drawmode]
        elif command.tag == "drawfilestl":
            # Imports the stl file as good as it can
            stl_path = path + "/" + command.attrib["file"]
            Mesh.insert(stl_path, "DSPH_Case")
            # TODO: Find a way to reference the mesh imported for adding it to sim.  For now it can't
            # toAddDSPH["STL" + str(elementnum)] = [int(mk[1]), mk[0], drawmode]
            pass
        else:
            # Command not supported, report and ignore
            utils.warning("The command: " + command.tag + " is not yet supported. Ignoring...")

        elementnum += 1

    FreeCAD.ActiveDocument.recompute()
    FreeCADGui.SendMsgToActiveView("ViewFit")
    return to_add_dsph
 def execute(self, username, args, user_flags):
     warning("Scheduled command ({}) ran directly by {}, please use "
             "CommandScheduler".format(" ".join(args), username))
示例#51
0
    def __update_game(self, game_now):
        new_game = False

        # W/o installation wave cannot be determined on endless/weekly
        # TODO Refactor method
        if game_now.wave is not None:
            new_map = self.server.game.game_map.title != game_now.map_title
            wave_drop = game_now.wave < (self.server.game.wave or 0)
            wave_reset = self.server.game.wave is None or wave_drop

            if new_map or wave_reset:
                new_game = True
        else:
            # Pick up the transition between supported and unsupported modes
            new_type = self.server.game.game_type != game_now.game_type
            if new_type:
                message = ("Game type ({}) support not installed, please "
                           "patch your webadmin to correct this! Guidance is "
                           "available at: {}")
                warning(
                    message.format(game_now.game_type,
                                   colored(BANNER_URL, 'magenta')))

                # TODO end_game should be triggered

        # Trigger end-game before loading next map's info
        if new_game and self.server.game.game_map.title \
                != GAME_MAP_TITLE_UNKNOWN:
            if self.server.game.game_type == GAME_TYPE_SURVIVAL:
                survival_boss_defeat = self.__survival_boss_defeat()
                self.server.event_end_game(not survival_boss_defeat)
            else:
                self.server.event_end_game(False)

        if game_now.trader_open and not self.server.trader_time:
            self.server.event_wave_end()
            self.server.event_trader_open()
        if not game_now.trader_open and self.server.trader_time:
            self.server.event_trader_close()

        self.server.game.game_map.title = game_now.map_title
        self.server.game.game_map.name = game_now.map_name
        self.server.game.wave = game_now.wave
        self.server.game.length = game_now.length
        self.server.game.difficulty = game_now.difficulty
        self.server.game.zeds_dead = game_now.zeds_dead
        self.server.game.zeds_total = game_now.zeds_total
        self.server.game.game_type = game_now.game_type
        self.server.game.players_max = game_now.players_max

        if new_game and game_now.map_title != GAME_MAP_TITLE_UNKNOWN:
            self.server.event_new_game()

        # TODO something better, abstract tracker per mode, test INSTALLED
        if self.server.game.wave is not None:
            if not self.server.trader_time \
                    and 0 < self.server.game.wave <= self.server.game.length:
                now = time.time()
                self.server.game.time += now - self.game_timer
                self.game_timer = time.time()
            else:
                self.game_timer = time.time()

            if game_now.wave > self.previous_wave:
                self.server.event_wave_start()
                self.previous_wave = self.server.game.wave
示例#52
0
raw_dir = 'setup'
eudaq_dir = dirname(dirname(realpath(__file__)))
data_dir = join(dirname(eudaq_dir), 'data')
conf = 'desy'

# Configuration
config = ConfigParser()
config.read(join(eudaq_dir, 'scripts', 'config', '{}.ini'.format(conf)))

runs = glob(join(data_dir, 'run*.raw'))
try:
    run_file = max(runs) if args.run is None else next(
        run for run in runs
        if basename(run).startswith('run{:06d}'.format(args.run)))
except StopIteration:
    run_file = max(runs)
    warning('Did not find run {} --> taking the last one ({}).'.format(
        args.run, run_file))

warning('Starting Online Monitor for {}'.format(basename(run_file)))

conf_file = join(eudaq_dir, *loads(config.get('MISC', 'config file')))

prog = join(eudaq_dir, 'bin', config.get('MISC', 'online monitor'))
cmd = '{} -d {p} -c {c} -rf '.format(prog, p=run_file, c=conf_file)
print '{} -d {p} -c {c} -rf '.format(basename(prog),
                                     p=basename(run_file),
                                     c=basename(conf_file))
print
system(cmd)
def main():
    parser = argparse.ArgumentParser()

    parser.add_argument('--log_dir', default='logdir-tacotron')

    parser.add_argument('--data_paths', default='.\\data\\moon,.\\data\\son')

    parser.add_argument('--load_path',
                        default=None)  # 아래의 'initialize_path'보다 우선 적용
    #parser.add_argument('--load_path', default='logdir-tacotron/moon+son_2018-12-25_19-03-21')

    parser.add_argument(
        '--initialize_path',
        default=None)  # ckpt로 부터 model을 restore하지만, global step은 0에서 시작

    parser.add_argument('--batch_size', type=int, default=16)
    parser.add_argument('--num_test_per_speaker', type=int, default=2)
    parser.add_argument('--random_seed', type=int, default=123)
    parser.add_argument('--summary_interval', type=int, default=100000)
    parser.add_argument('--test_interval', type=int, default=500)  # 500
    parser.add_argument('--checkpoint_interval', type=int,
                        default=2000)  # 2000
    parser.add_argument('--skip_path_filter',
                        type=str2bool,
                        default=False,
                        help='Use only for debugging')

    parser.add_argument('--slack_url',
                        help='Slack webhook URL to get periodic reports.')
    parser.add_argument(
        '--git',
        action='store_true',
        help='If set, verify that the client is clean.'
    )  # The store_true option automatically creates a default value of False.

    config = parser.parse_args()
    config.data_paths = config.data_paths.split(",")
    setattr(hparams, "num_speakers", len(config.data_paths))

    prepare_dirs(config, hparams)

    log_path = os.path.join(config.model_dir, 'train.log')
    infolog.init(log_path, config.model_dir, config.slack_url)

    tf.set_random_seed(config.random_seed)
    print(config.data_paths)

    if any("krbook" not in data_path for data_path in
           config.data_paths) and hparams.sample_rate != 20000:
        warning(
            "Detect non-krbook dataset. May need to set sampling rate from {} to 20000"
            .format(hparams.sample_rate))

    if any('LJ' in data_path for data_path in
           config.data_paths) and hparams.sample_rate != 22050:
        warning("Detect LJ Speech dataset. Set sampling rate from {} to 22050".
                format(hparams.sample_rate))

    if config.load_path is not None and config.initialize_path is not None:
        raise Exception(
            " [!] Only one of load_path and initialize_path should be set")

    train(config.model_dir, config)
示例#54
0
 def printwarnings(self):
     """ Print any warnings accumulated throughout constructor """
     sys.stderr.write('\n')
     for msg in self.warnings:
         warning(msg)
     sys.stderr.write('\n')
示例#55
0
import utils as u
import vk_api as v
WAIT_TIME = u.WAIT_TIME

# Load myself and create dir
u.info("Загрузка текущего профиля")
me = v.get_user()
u.info("Текущий профиль:", v.gen_name(me))

done = u.done_read(me)
if done['uid'] == me['uid']:
    u.warning(
        "Найден файл сохранения для этого профиля.\nЕсли вам надо начать заново, просто удалите файл %s."
        % done['filename'])
    dirname = v.gen_dir_name(me)  #v.gen_name(me) + '_' + str(me['uid'])
else:
    u.info("Создание директории")
    dirname = v.gen_dir_name(me)
    dirname = u.mklsdir(dirname)
    u.info("Текущая директория:", dirname)
    done['uid'] = me['uid']

u.info("Загрузка диалогов")
dialogs = v.get_dialogs()

u.info("Всего %d диалогов" % len(dialogs))
"Загрузка личных сообщений:"
for idx, dialog in enumerate(dialogs):
    u.sleep(WAIT_TIME)

    file = None
示例#56
0
def convert_vfr(ids,
                odsn,
                frmt,
                layers=[],
                overwrite=False,
                options=[],
                geom_name=None,
                mode=Mode.write,
                nogeomskip=True,
                userdata={}):
    odrv = ogr.GetDriverByName(frmt)
    if odrv is None:
        fatal("Format '%s' is not supported" % frmt)

    # try to open datasource
    ods = odrv.Open(odsn, True)
    if ods is None:
        # if fails, try to create new datasource
        ods = odrv.CreateDataSource(odsn)
    if ods is None:
        fatal("Unable to open or create new datasource '%s'" % odsn)

    create_geom = ods.TestCapability(ogr.ODsCCreateGeomFieldAfterCreateLayer)
    if not geom_name and not create_geom:
        warning("Driver '%s' doesn't support multiple geometry columns. "
                "Only first will be used." % odrv.GetName())

    # OVERWRITE is not support by Esri Shapefile
    if overwrite:
        if frmt != 'ESRI Shapefile':
            options.append("OVERWRITE=YES")
        if mode == Mode.write:
            # delete also layers which are not part of ST_UKSH
            for layer in ("ulice", "parcely", "stavebniobjekty",
                          "adresnimista"):
                if ods.GetLayerByName(layer) is not None:
                    ods.DeleteLayer(layer)

    # process features marked for deletion first
    dlist = None  # statistics
    if mode == Mode.change:
        dlayer = ids.GetLayerByName('ZaniklePrvky')
        if dlayer:
            dlist = process_deleted_features(dlayer, ods, layers)

    # process layers
    start = time.time()
    nlayers = ids.GetLayerCount()
    nfeat = 0
    for iLayer in range(nlayers):
        layer = ids.GetLayer(iLayer)
        layer_name = layer.GetName()
        ### force lower case for output layers, some drivers are doing
        ### that automatically anyway
        layer_name_lower = layer_name.lower()

        if layers and layer_name not in layers:
            # process only selected layers
            continue

        if layer_name == 'ZaniklePrvky':
            # skip deleted features (already done)
            continue

        olayer = ods.GetLayerByName('%s' % layer_name_lower)
        sys.stdout.write("Processing layer %-20s ..." % layer_name)
        if not overwrite and (olayer and mode == Mode.write):
            sys.stdout.write(
                " already exists (use --overwrite or --append to modify existing data)\n"
            )
            continue

        ### TODO: fix output drivers not to use default geometry
        ### names
        if frmt in ('PostgreSQL', 'OCI') and not geom_name:
            if layer_name_lower == 'ulice':
                remove_option(options, 'GEOMETRY_NAME')
                options.append('GEOMETRY_NAME=definicnicara')
            else:
                remove_option(options, 'GEOMETRY_NAME')
                options.append('GEOMETRY_NAME=definicnibod')

        # delete layer if exists and append is not True
        if olayer and mode == Mode.write:
            if delete_layer(ids, ods, layer_name_lower):
                olayer = None

        # create new output layer if not exists
        if not olayer:
            olayer = create_layer(ods, layer, layer_name_lower, geom_name,
                                  create_geom, options)
        if olayer is None:
            fatal("Unable to export layer '%s'. Exiting..." % layer_name)

        # pre-process changes
        if mode == Mode.change:
            change_list = process_changes(layer, olayer)
            if dlist and layer_name in dlist:  # add features to be deleted
                change_list.update(dlist[layer_name])

        ifeat = n_nogeom = 0
        geom_idx = -1

        # make sure that PG sequence is up-to-date (import for fid == -1)
        fid = -1
        if 'pgconn' in userdata:
            fid = get_fid_max(userdata['pgconn'], layer_name_lower)
            if fid > 0:
                update_fid_seq(userdata['pgconn'], layer_name_lower, fid)
        if fid is None or fid == -1:
            fid = olayer.GetFeatureCount()

        # start transaction in output layer
        if olayer.TestCapability(ogr.OLCTransactions):
            olayer.StartTransaction()

        # delete marked features first (changes only)
        if mode == Mode.change and dlist and layer_name in dlist:
            for fid in dlist[layer_name].keys():
                olayer.DeleteFeature(fid)

        # do mapping for fields (needed for Esri Shapefile when
        # field names are truncated)
        field_map = [i for i in range(0, layer.GetLayerDefn().GetFieldCount())]

        # copy features from source to destination layer
        layer.ResetReading()
        feature = layer.GetNextFeature()
        while feature:
            # check for changes first (delete/update/add)
            if mode == Mode.change:
                c_fid = feature.GetFID()
                action, o_fid = change_list.get(c_fid, (None, None))
                if action is None:
                    fatal("Layer %s: unable to find feature %d" %
                          (layer_name, c_fid))

                # feature marked to be changed (delete first)
                if action in (Action.delete, Action.update):
                    olayer.DeleteFeature(o_fid)

                # determine fid for new feature
                if action == Action.add:
                    fid = -1
                else:
                    fid = o_fid

                if action == Action.delete:
                    # do nothing and continue
                    feature = layer.GetNextFeature()
                    ifeat += 1
                    continue
            else:
                fid += 1

            # clone feature
            ### ofeature = feature.Clone() # replace by SetFrom()
            ofeature = ogr.Feature(olayer.GetLayerDefn())
            ofeature.SetFromWithMap(feature, True, field_map)

            # modify geometry columns if requested
            if geom_name:
                if geom_idx < 0:
                    geom_idx = feature.GetGeomFieldIndex(geom_name)

                    # delete remaining geometry columns
                    ### not needed - see SetFrom()
                    ### odefn = ofeature.GetDefnRef()
                    ### for i in range(odefn.GetGeomFieldCount()):
                    ###    if i == geom_idx:
                    ###        continue
                    ###    odefn.DeleteGeomFieldDefn(i)

                modify_feature(feature, geom_idx, ofeature)

            if ofeature.GetGeometryRef() is None:
                n_nogeom += 1
                if nogeomskip:
                    # skip feature without geometry
                    feature = layer.GetNextFeature()
                    ofeature.Destroy()
                    continue

            # set feature id
            if fid >= -1:
                # fid == -1 -> unknown fid
                ofeature.SetFID(fid)

            # add new feature to output layer
            olayer.CreateFeature(ofeature)

            feature = layer.GetNextFeature()
            ifeat += 1

        # commit transaction in output layer
        if olayer.TestCapability(ogr.OLCTransactions):
            olayer.CommitTransaction()

        # print statistics per layer
        sys.stdout.write(" %10d features" % ifeat)
        if mode == Mode.change:
            n_added = n_updated = n_deleted = 0
            for action, unused in change_list.itervalues():
                if action == Action.update:
                    n_updated += 1
                elif action == Action.add:
                    n_added += 1
                else:  # Action.delete:
                    n_deleted += 1
            sys.stdout.write(" (%5d added, %5d updated, %5d deleted)" % \
                                 (n_added, n_updated, n_deleted))
        else:
            sys.stdout.write(" added")
            if n_nogeom > 0:
                if nogeomskip:
                    sys.stdout.write(" (%d without geometry skipped)" %
                                     n_nogeom)
                else:
                    sys.stdout.write(" (%d without geometry)" % n_nogeom)
        sys.stdout.write("\n")

        nfeat += ifeat

        # update sequence for PG
        if 'pgconn' in userdata:
            ### fid = get_fid_max(userdata['pgconn'], layer_name_lower)
            if fid > 0:
                update_fid_seq(userdata['pgconn'], layer_name_lower, fid)

    # close output datasource
    ods.Destroy()

    # final statistics (time elapsed)
    message("Time elapsed: %d sec" % (time.time() - start))

    return nfeat
示例#57
0
        buf = ''
        while True:
            ch = getch()

            if type(ch) is not str:
                ch = ch.decode()

            if ch in ['\n', '\r', '\r\n', '\n\r']:
                print('')
                break
            else:
                buf += str(ch)
                print('*', end='', flush=True)
        return buf
else:
    warning("getch unavailable")
if not sys.stdin.isatty():
    warning("Bad terminal")

_ = gettext.gettext

CONFIG_PATH = find_data_file("conf/magicked_admin.conf")
CONFIG_PATH_DISPLAY = "conf/magicked_admin.conf"

SETTINGS_DEFAULT = {
    'server_name': 'server_one',
    # address = 127.0.0.1:8080
    # username = Admin
    # password = 123
    'game_password': '******',
    'refresh_rate': '1'
示例#58
0
    else:
        utils.info('图片{}不存在,准备下载'.format(path))
        need_download.append((entity, path))

sess = requests.Session()
sess.proxies = conf.proxies
sess.headers.update({
    'user-agent': conf.ua,
    'cookie': conf.cookie_string,
    'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
    'origin': 'https://www.fanbox.cc'
})
for i, (entity, path) in enumerate(need_download):
    if i > 0:
        time.sleep(conf.wait_sec + random.randint(5, 15))
    try:
        utils.info('图片{}下载中 ({}/{})'.format(path, i+1, len(need_download)))
        res = sess.get(entity.image_url, headers={'referer': entity.page_url})
        if res.ok:
            with open(path, 'wb') as target:
                target.write(res.content)
            succ_download += 1
            utils.info('已成功下载')
        else:
            fail_download += 1
            utils.warning('请求失败')
    except:
        fail_download += 1
        utils.warning('请求异常')
print('成功下载了{}张新图片,失败{}张,{}张图片已存在'.format(succ_download, fail_download, touched))
示例#59
0
文件: infer.py 项目: P79N6A/piglab
def infer(img_file='', model_path='./'):
    if img_file == '':
        opts, args = getopt.getopt(sys.argv[1:], "p:", ["file_name="])
        if len(args) == 0:
            print(
                "usage:  python infer.py [file_name_name]  \n\tpython infer.py infer_62.jpeg"
            )
            return 1, 'file_name is empty', {}
        file_name = args[0]
        img_file = './data/image/' + file_name
    else:
        file_name = img_file.split('/')[-1]

    # 是否使用GPU
    use_cuda = False  # set to True if training with GPU
    place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()

    params_dirname = model_path + "/model/"  # 模型参数保存目录
    height = width = 28

    # 加载测试数据
    imgs = []  #使用多种不同的预处理方法
    imgs_weight = [1, 0.99, 0.99]  #不同预处理方法的结果权重
    try:  #测试集图片
        imgs.append(utils.load_image(img_file, height, width))
    except:
        utils.warning(utils.get_trace())
        imgs.append([])
    try:  #白纸手写照片
        if len(file_name.split('_')[1].split('.')[0]) >= 2 and int(
                file_name.split('_')[1][1:2]) > 0:
            imgs_weight[1] = 5
        imgs.append(
            utils.load_image(img_file,
                             height,
                             width,
                             rotate=0,
                             sobel=True,
                             save_resize=True,
                             ksize=5,
                             dilate=1))
    except:
        utils.warning(utils.get_trace(), 'infer')
        imgs.append([])
    try:  #黑纸粗笔写照片
        imgs.append(
            utils.load_image(img_file,
                             height,
                             width,
                             rotate=0,
                             sobel=True,
                             save_resize=True,
                             ksize=3,
                             dilate=6,
                             erode=1))
    except:
        utils.warning(utils.get_trace(), 'infer')
        imgs.append([])

    # 使用保存的模型参数+测试图片进行预测
    inferencer = Inferencer(
        # infer_func=softmax_regression, # uncomment for softmax regression
        # infer_func=multilayer_perceptron, # uncomment for MLP
        infer_func=utils.convolutional_neural_network,  # uncomment for LeNet5
        param_path=params_dirname,
        place=place)

    results = []
    results_sum = numpy.ndarray([])
    numpy.set_printoptions(precision=2)
    for i in xrange(len(imgs)):
        if len(imgs[i]) == 0: continue
        result = inferencer.infer({'img': imgs[i]})  #此输入img的各label概率
        result = numpy.where(result[0][0] > 0.01, result[0][0],
                             0)  #概率<0.1%的直接设置为0
        print(result)
        results.append(result)
        print(numpy.argsort(result))
        results_sum = results_sum + result * imgs_weight[i]  #累加label下标概率
    #print(imgs_weight)
    #按概率加和排序
    lab = numpy.argsort(
        results_sum)  # probs and lab are the results of one batch data
    label = lab[-1]  #概率倒排最后一个
    weight = []
    for result in results:
        if numpy.argsort(result)[-1] == label:
            weight = result
    print("*label weight sort:")
    print(results_sum)
    print(lab)
    print("*img: %s" % img_file)
    print("*label: %d weight: %f" % (label, weight[label]))
    return 0, '', {
        'img': img_file,
        'label': label,
        'weight': list(weight.astype(str))
    }
示例#60
0
def create_description_tex_file(problem):
    utils.fill_template(utils.Templates.TeX.problem(), problem.tex_file)
    utils.warning([
        'Não se esqueça de preencher a descrição do problema:',
        '    ' + problem.tex_file
    ])