コード例 #1
1
def diff_structure(st_keys, st_one, st_two):
    st_new = {}
    for i in st_one.keys():
        if i in st_keys:
            if i not in st_two:
                st_new[i] = st_one[i]
            elif st_keys[i] == "string" and str(st_one[i]) != str(st_two[i]):
                st_new[i] = st_one[i]
            elif st_keys[i] == "int" and int(st_one[i]) != int(st_two[i]):
                st_new[i] = st_one[i]
            elif st_keys[i] == "long" and long(st_one[i]) != long(st_two[i]):
                st_new[i] = st_one[i]
            elif st_keys[i] == "array":
                a_st_one = st_one[i] if isinstance(st_one[i], list) else eval(st_one[i])
                b_st_one = st_one[i] if isinstance(st_two[i], list) else eval(st_two[i])
                em_news = list(set(a_st_one).difference(set(b_st_one)))
                em_deleted = list(set(b_st_one).difference(set(a_st_one)))
                if len(em_news) > 0 or len(em_deleted) > 0:
                    st_new[i] = a_st_one
            else:
                pass
    if len(st_new.keys()) > 0:
        return st_new
    else:
        return None
コード例 #2
0
ファイル: layout.py プロジェクト: cashlalala/AudioBox
 def setAttachedProperty(self, prop, **argd):
     global layout_dic
     name = prop['name'].split('.')[-1].lower()
     data = prop['data']
     child = prop['child']
     if name == 'dock_side':
         data = data.strip()
         result = eval(data, layout_dic)            
         self.dock_side(child, result)
     elif name == 'dock':
         docks = data.split(',')
         dock_cmd = {}
         for d in docks:
             dock, region = d.split(':')
             dock = dock.strip().lower()
             try:
                 dock = RegionLayoutItem.keymap[dock]
             except:
                 print '[layout.py] unknown dock name(%s) !!!' %dock
             try:
                 region = eval(region.strip())
             except:
                 if argd.get('parser', None):
                     region = argd['parser'].queryMacroValue(region.strip())
                 else:
                     region = eval(strValue, argd['macro'])
             dock_cmd[dock] = region
         self.dock(child, dock_cmd)
コード例 #3
0
ファイル: _composites_.py プロジェクト: emayssat/epics-opis
 def repeatLWidgetsSignals(self, signals = None):
     super(LExecutableComposite, self).repeatLWidgetsSignals()
     if signals is None:
         signals = [ 'processStarted', 'processFinished', 'processResumed', 'processPaused' ]
     for widget in self.getLWidgets():
         for signal in signals:
             eval("widget.%s.connect(self.%s.emit)" % (signal, signal))
コード例 #4
0
ファイル: fgShellRain.py プロジェクト: javidiaz/rain
 def help_rainlaunch(self):
     msg = (
         "Rain launch command: Run a command in the requested OS or enter in Interactive mode. The requested OS can be already registered in the requested "
         + " infrastructure or stored in the Image Repository. The latter implies to register the image in the requested infrastructure"
     )
     self.print_man("launch ", msg)
     eval('self.do_rainlaunch("-h")')
コード例 #5
0
ファイル: question2.py プロジェクト: MrHamdulay/csc3-capstone
def vend():
    """Simulate a vending machine, taking user input and returning remainder."""
    total = eval(input("Enter the cost (in cents):\n"))
    inserted = 0
    while inserted < total:
        inserted += eval(input("Deposit a coin or note (in cents):\n"))
    if inserted > total:
        sum = inserted - total
        if sum != 0:
            print("Your change is:")
        dollars = sum//100
        if dollars != 0:
            print(dollars,'x $1')
        quarters = (sum - dollars*100)//25
        if quarters != 0:
            print(quarters,'x 25c')
        ten_cents = (sum - dollars*100 - quarters*25)//10
        if ten_cents != 0:
            print(ten_cents,'x 10c')
        five_cents = (sum - dollars*100 - quarters*25 - ten_cents*10)//5
        if five_cents != 0:
            print(five_cents,'x 5c')
        one_cents = (sum - dollars*100 - quarters*25 - ten_cents*10 - five_cents*5)//1
        if one_cents != 0:
            print(one_cents,'x 1c')
コード例 #6
0
ファイル: render.py プロジェクト: a-r-williamson/pycbc
def setup_template_render(path, config_path):
    """ This function is the gateway for rendering a template for a file.
    """

    # initialization
    cp = get_embedded_config(path)
    output = ''
    filename = os.path.basename(path)

    # use meta-data if not empty for rendering
    if cp.has_option(filename, 'render-function'):
        render_function_name = cp.get(filename, 'render-function')
        render_function = eval(render_function_name)
        output = render_function(path, cp)

    # read configuration file for rendering
    elif os.path.exists(config_path):
        cp.read(config_path)

        # render template
        if cp.has_option(filename, 'render-function'):
            render_function_name = cp.get(filename, 'render-function')
            render_function = eval(render_function_name)
            output = render_function(path, cp)
        else:
            output = render_default(path, cp)

    # if no configuration file is present
    # then render the default template
    else:
        output = render_default(path, cp)

    return output
コード例 #7
0
ファイル: metrics.py プロジェクト: lowks/pyProCT
def filterRecords(expression,records):
	## Format the string
	tags = ["not","and","or",">","<",">=","<=","==","+","-","(",")"]
	expression = expression.lower()
	for t in tags:
		expression = expression.replace(t," "+t+" ")

	## Get all keys from records
	all_keys = set([])
	for r in records:
		all_keys = all_keys | set (r.keys())

	## Identify metrics in expression
	words = expression.split()
	for w in all_keys:
		if w in words:
			expression = expression.replace(w,"r[\""+w+"\"]")

	## Delete SEQRES records
	preselection = []
	for r in records:
		if  not "seqres" in r.keys():
			preselection.append(r)

	selection = []
	for r in preselection:
		if eval(expression):
			selection.append(r)
		print(eval(expression))

	return selection
コード例 #8
0
ファイル: generate_wiki_docs.py プロジェクト: valrus/mingus3
def generate_package_wikidocs(package_string, file_prefix='ref',
                              file_suffix='.wiki'):
    d = Documize()
    package = eval(package_string)
    print('''

Generating documentation for package %s''' % package_string)
    for element in dir(package):
        if not isinstance(element, collections.Callable):
            fullname = '%s.%s' % (package_string, element)
            if type(eval(fullname)) == types.ModuleType or type(eval(fullname))\
                 == type:
                d.set_module(fullname)
                wikiname = file_prefix
                for parts in fullname.split('.'):
                    wikiname += parts.capitalize()
                wikiname += file_suffix
                print('Writing %s...' % wikiname, end=' ')
                result = d.output_wiki()
                try:
                    f = open(os.path.join(sys.argv[1], wikiname), 'w')
                    try:
                        f.write(result)
                        print('OK')
                    except:
                        print("ERROR. Couldn't write to file.")
                    f.close()
                except:
                    print("ERROR. Couldn't open file for writing.")
コード例 #9
0
 def make_seq(items, part):
     if items is nil:
         return nil
     elif eval("items.first.{0}".format(part)) is nil:
         return nil
     else:
         return Pair(eval("items.first.{0}".format(part)), make_seq(items.second, part))
コード例 #10
0
def _install_handlers(cp, formatters):
    """Install and return handlers"""
    hlist = cp["handlers"]["keys"]
    if not len(hlist):
        return {}
    hlist = hlist.split(",")
    hlist = _strip_spaces(hlist)
    handlers = {}
    fixups = [] #for inter-handler references
    for hand in hlist:
        section = cp["handler_%s" % hand]
        klass = section["class"]
        fmt = section.get("formatter", "")
        try:
            klass = eval(klass, vars(logging))
        except (AttributeError, NameError):
            klass = _resolve(klass)
        args = section["args"]
        args = eval(args, vars(logging))
        h = klass(*args)
        if "level" in section:
            level = section["level"]
            h.setLevel(logging._levelNames[level])
        if len(fmt):
            h.setFormatter(formatters[fmt])
        if issubclass(klass, logging.handlers.MemoryHandler):
            target = section.get("target", "")
            if len(target): #the target handler may not be loaded yet, so keep for later...
                fixups.append((h, target))
        handlers[hand] = h
    #now all handlers are loaded, fixup inter-handler references...
    for h, t in fixups:
        h.setTarget(handlers[t])
    return handlers
コード例 #11
0
    def process_report(self, node):
        values = {}
        for dest, f in (('name','string'), ('model','model'), ('report_name','name')):
            values[dest] = getattr(node, f)
            assert values[dest], "Attribute %s of report is empty !" % (f,)
        for field,dest in (('rml','report_rml'),('file','report_rml'),('xml','report_xml'),('xsl','report_xsl'),('attachment','attachment'),('attachment_use','attachment_use')):
            if getattr(node, field):
                values[dest] = getattr(node, field)
        if node.auto:
            values['auto'] = eval(node.auto)
        if node.sxw:
            sxw_file = misc.file_open(node.sxw)
            try:
                sxw_content = sxw_file.read()
                values['report_sxw_content'] = sxw_content
            finally:
                sxw_file.close()
        if node.header:
            values['header'] = eval(node.header)
        values['multi'] = node.multi and eval(node.multi)
        xml_id = node.id
        self.validate_xml_id(xml_id)

        self._set_group_values(node, values)

        id = self.pool.get('ir.model.data')._update(self.cr, SUPERUSER_ID, "ir.actions.report.xml", \
                self.module, values, xml_id, noupdate=self.isnoupdate(node), mode=self.mode)
        self.id_map[xml_id] = int(id)

        if not node.menu or eval(node.menu):
            keyword = node.keyword or 'client_print_multi'
            value = 'ir.actions.report.xml,%s' % id
            replace = node.replace or True
            self.pool.get('ir.model.data').ir_set(self.cr, SUPERUSER_ID, 'action', \
                    keyword, values['name'], [values['model']], value, replace=replace, isobject=True, xml_id=xml_id)
コード例 #12
0
ファイル: gdal_calc.py プロジェクト: cbajema/Edgar
def main():

    usage = """
gdal_calc.py [-A <filename>] [--A_band] [-B...-Z filename]  [--calc <calculation>] [--format] [--outfile output_file] [--type data_type] [--NoDataValue] [--overwrite]
    """

    parser = OptionParser(usage)

    # define options
    parser.add_option("--calc", dest="calc", help="calculation in gdalnumeric syntax using +-/* or any numpy array functions (i.e. logical_and())")
    # hack to limit the number of input file options close to required number
    for myAlpha in AlphaList[0:len(sys.argv)-1]:
        eval('parser.add_option("-%s", dest="%s", help="input gdal raster file, note you can use any letter A-Z")' %(myAlpha, myAlpha))
        eval('parser.add_option("--%s_band", dest="%s_band", default=0, type=int, help="number of raster band for file %s")' %(myAlpha, myAlpha, myAlpha))

    parser.add_option("--outfile", dest="outF", default='gdal_calc.tif', help="output file to generate or fill.")
    parser.add_option("--NoDataValue", dest="NoDataValue", type=float, help="set output nodatavalue (Defaults to datatype specific values)")
    parser.add_option("--type", dest="type", help="set datatype must be one of %s" % list(DefaultNDVLookup.keys()))
    parser.add_option("--format", dest="format", default="GTiff", help="GDAL format for output file (default 'GTiff')")
    parser.add_option("--overwrite", dest="overwrite", action="store_true", help="overwrite output file if it already exists")
    parser.add_option("--debug", dest="debug", action="store_true", help="print debugging information")

    (opts, args) = parser.parse_args()

    if len(sys.argv) == 1:
        print(usage)
    elif not opts.calc:
        print("No calculation provided.  Nothing to do!")
        print(usage)
    else:
        doit(opts, args)
コード例 #13
0
ファイル: train.py プロジェクト: SigmaQuan/NMT-Coverage
def main():
    args = parse_args()

    state = getattr(experiments.nmt, args.proto)()
    if args.state:
        if args.state.endswith(".py"):
            state.update(eval(open(args.state).read()))
        else:
            with open(args.state) as src:
                state.update(cPickle.load(src))
    for change in args.changes:
        state.update(eval("dict({})".format(change)))

    logging.basicConfig(level=getattr(logging, state['level']), format="%(asctime)s: %(name)s: %(levelname)s: %(message)s")
    logger.debug("State:\n{}".format(pprint.pformat(state)))

    rng = numpy.random.RandomState(state['seed'])
    enc_dec = RNNEncoderDecoder(state, rng, skip_init=args.skip_init, compute_alignment=True)
    enc_dec.build()
    lm_model = enc_dec.create_lm_model()

    logger.debug("Load data")
    train_data = get_batch_iterator(state)
    logger.debug("Compile trainer")
    algo = eval(state['algo'])(lm_model, state, train_data)
    logger.debug("Run training")
    main = MainLoop(train_data, None, None, lm_model, algo, state, None,
            reset=state['reset'],
            hooks=[RandomSamplePrinter(state, lm_model, train_data)]
                if state['hookFreq'] >= 0
                else None)
    if state['reload']:
        main.load()
    if state['loopIters'] > 0:
        main.main()
コード例 #14
0
 def _eval_params(self, model, params):
     args = []
     for i, param in enumerate(params):
         if isinstance(param, types.ListType):
             value = self._eval_params(model, param)
         elif is_ref(param):
             value = self.process_ref(param)
         elif is_eval(param):
             value = self.process_eval(param)
         elif isinstance(param, types.DictionaryType): # supports XML syntax
             param_model = self.get_model(param.get('model', model))
             if 'search' in param:
                 q = eval(param['search'], self.eval_context)
                 ids = param_model.search(self.cr, self.uid, q)
                 value = self._get_first_result(ids)
             elif 'eval' in param:
                 local_context = {'obj': lambda x: param_model.browse(self.cr, self.uid, x, self.context)}
                 local_context.update(self.id_map)
                 value = eval(param['eval'], self.eval_context, local_context)
             else:
                 raise YamlImportException('You must provide either a !ref or at least a "eval" or a "search" to function parameter #%d.' % i)
         else:
             value = param # scalar value
         args.append(value)
     return args
コード例 #15
0
ファイル: analysis.py プロジェクト: AlistairMills/mantid
def plot_results_with_slope(results, x_field, y_field, x_scale=1):
    """ Function to plot Y vs X of anything. It accesses the members of "results" to plot them.
    other_field is used to separate by another field, and make separate line plots for each
    
    @param x_scale :: multiply x by this amount
    """
    figure()
    
    data = []
    for par in results:   
        x = eval('par.%s' % x_field)      
        y = eval('par.%s' % y_field)
        data.append( (x,y) )      
    data.sort()
    xs = [x*x_scale for (x,y) in data]
    ys = [y for (x,y) in data]
    
    # Now get the slope 
    gradient, intercept, r_value, p_value, std_err = stats.linregress(xs,ys)
    
    p = plot(xs,ys, marker='.', label="y = %.3gx + %.3g" % (gradient, intercept))
            
    title("%s vs %s" % (y_field, x_field));
    xlabel("%s x %s" % (x_field, x_scale) )
    ylabel(y_field)
    legend(loc='best')
    savefig("%s_vs_%s.png" % (y_field, x_field));
コード例 #16
0
ファイル: plural.py プロジェクト: nickretallack/babel
def to_python(rule):
    """Convert a list/dict of rules or a `PluralRule` object into a regular
    Python function.  This is useful in situations where you need a real
    function and don't are about the actual rule object:

    >>> func = to_python({'one': 'n is 1', 'few': 'n in 2..4'})
    >>> func(1)
    'one'
    >>> func(3)
    'few'
    >>> func = to_python({'one': 'n in 1,11', 'few': 'n in 3..10,13..19'})
    >>> func(11)
    'one'
    >>> func(15)
    'few'

    :param rule: the rules as list or dict, or a `PluralRule` object
    :return: a corresponding Python function
    :raise RuleError: if the expression is malformed
    """
    namespace = {
        'IN':       in_range_list,
        'WITHIN':   within_range_list,
        'MOD':      cldr_modulo
    }
    to_python = _PythonCompiler().compile
    result = ['def evaluate(n):']
    for tag, ast in PluralRule.parse(rule).abstract:
        # the str() call is to coerce the tag to the native string.  It's
        # a limited ascii restricted set of tags anyways so that is fine.
        result.append(' if (%s): return %r' % (to_python(ast), str(tag)))
    result.append(' return %r' % _fallback_tag)
    code = compile('\n'.join(result), '<rule>', 'exec')
    eval(code, namespace)
    return namespace['evaluate']
コード例 #17
0
ファイル: Ace.py プロジェクト: faircloth-lab/msatcommander-gs
 def bs(self,line):
     header=line.split()
     bsdata=bs()
     bsdata.padded_start=eval(header[1])
     bsdata.padded_end=eval(header[2])
     bsdata.name=header[3]
     self.data.bs.append(bsdata)
コード例 #18
0
ファイル: analysis.py プロジェクト: AlistairMills/mantid
def plot_results_vs_other(results, x_field, y_field, other_field, extra_title=""):
    """ Function to plot Y vs X of anything. It accesses the members of "results" to plot them.
    other_field is used to separate by another field, and make separate line plots for each"""
    others = set()
    for par in results:
        others.add( eval('par.%s' % other_field) )
    others = list(others)
    others.sort()
        
    figure()
    
    for other in others:
        data = []
        for par in results:   
            this_other = eval('par.%s' % other_field)
            if this_other == other: 
                x = eval('par.%s' % x_field)      
                y = eval('par.%s' % y_field)
                data.append( (x,y) )      
        data.sort()
        xs = [x for (x,y) in data]
        ys = [y for (x,y) in data]
        p = plot(xs,ys, marker='.', label="%s = %f" % (other_field, other))
        
    if extra_title != "": extra_title = "\n" + extra_title
    title("%s vs %s%s" % (y_field, x_field, extra_title) );
    xlabel(x_field)
    ylabel(y_field)
    legend(loc='best')
    savefig("%s_vs_%s.png" % (y_field, x_field));
コード例 #19
0
ファイル: __init__.py プロジェクト: ckrisgarrett/closures-2d
def platform_module(name = platform_default()):
    """Return the imported module for the platform.

    This looks for a module name that matches the specified argument.
    If the name is unspecified, we fetch the appropriate default for
    our execution environment.
    """
    full_name = 'SCons.Platform.' + name
    if full_name not in sys.modules:
        if os.name == 'java':
            eval(full_name)
        else:
            try:
                file, path, desc = imp.find_module(name,
                                        sys.modules['SCons.Platform'].__path__)
                try:
                    mod = imp.load_module(full_name, file, path, desc)
                finally:
                    if file:
                        file.close()
            except ImportError:
                try:
                    import zipimport
                    importer = zipimport.zipimporter( sys.modules['SCons.Platform'].__path__[0] )
                    mod = importer.load_module(full_name)
                except ImportError:
                    raise SCons.Errors.UserError("No platform named '%s'" % name)
            setattr(SCons.Platform, name, mod)
    return sys.modules[full_name]
コード例 #20
0
ファイル: Ace.py プロジェクト: faircloth-lab/msatcommander-gs
 def co_header(self,line):
     header=line.split()
     self.data.name=header[1]
     self.data.nbases=eval(header[2])
     self.data.nreads=eval(header[3])
     self.data.nsegments=eval(header[4])
     self.data.uorc=header[5]
コード例 #21
0
ファイル: question2.py プロジェクト: MrHamdulay/csc3-capstone
def vending_machine():
    deposit = 0 
    #get the cost of the item(s) purchased
    cost = eval(input('Enter the cost (in cents):\n'))
    #ask for money to pay when cost is greater than 0 and ask for more deposit when not enough
    while deposit < cost:
        deposit += eval(input('Deposit a coin or note (in cents):\n'))
    change = deposit - cost
    #Give change when due
    if change > 0:
        print('Your change is:')
        
        for i in (100, 25, 10, 5, 1):
            #check decreasingly if one of the possible coin is part of the change
            if change >= i:
                #specify for change more than or equal to $1 as in dollar               
                if i == 100:
                    print(change//i, ' x ', '$1',sep = '')
                else:
                    print(change//i, ' x ', i,'c',sep = '')
                change -= (change//i)*i
                #check until chanege is 0 then break loop
                if change == 0:
                    break
            else:
                continue
コード例 #22
0
def getScoreOfExample(rule, dataSetExample):
    # print "dataSetExample:", dataSetExample
    # create variables that are the features of the example
    for feature in featuresInDataSet:
        if dataSetExample[feature] == SAME or dataSetExample[feature] == DIFFERENT:
            vars()[feature] = dataSetExample[feature]
        else:
            vars()[feature] = int(dataSetExample[feature])
        # FIVE ####vars()[feature] = dataSetExample[feature]
    # eval preCondition and condition to know what score to choose
    score = 0
    for sNodeAndItsAssociatedPNodes in rule:
        # print "Current Rule:", sNodeAndItsAssociatedPNodes
        # print "before score:", score
        if eval(sNodeAndItsAssociatedPNodes.preCondition):
            if eval(sNodeAndItsAssociatedPNodes.condition):
                # print("going to alpha1")
                score += sNodeAndItsAssociatedPNodes.alpha1
            else:
                # print("going to alpha2")
                score += sNodeAndItsAssociatedPNodes.alpha2
        else:
            score += 0
        # print "after score:", score
    return score
コード例 #23
0
 def processAutoComp(self, aco):
     """ Processes an autocomp request using an AutoCompObject instance. 
     """
     
     # Try using buffer first
     if aco.tryUsingBuffer():
         return
     
     # Include buildins?
     if not aco.name:
         command = "__builtins__.keys()"
         try:
             names = eval(command, {}, self._interpreter.locals)
             aco.addNames(names)
         except Exception:
             pass
     
     # Query list of names
     command = "dir({})".format(aco.name)
     try:
         names = eval(command, {}, self._interpreter.locals)
         aco.addNames(names)
     except Exception:
         pass
     
     # Done
     aco.finish()
コード例 #24
0
ファイル: Tests.py プロジェクト: johnglover/libsms
 def test_sms_fillHeader(self):
     "sms_fillHeader copies data from an SMS_AnalParams to an SMS_Header"
     data_fields = ["nFrames", "iFormat", "iFrameRate", "iStochasticType", \
                    "nTracks", "iSamplingRate", "nStochasticCoeff"]
     sms_fillHeader(self.sms_header, self.analysis_params, "")
     for field in data_fields:
         self.assert_(eval("self.sms_header."+field) == eval("self.analysis_params."+field))
コード例 #25
0
ファイル: personal_apoyo.py プロジェクト: knathis/coldeportes
def ejecutar_casos_recursivos(consultas,departamentos,genero,tipoTenant):
    """
    Noviembre 13, 2015
    Autor: Daniel Correa

    Permite ejecutar los diferentes filtros de casos de acuerdo a un arreglo de consultas
    CONSULTAS LLEVA EL SIGUIENTE FORMATO [contulta caso 1, consulta caso 2 , consulta caso 3, ... ,consulta caso n]
    LOS CASOS EMPIEZAN EN 1 EL DE MAS ARRIBA HASTA N EL DE MAS ABAJO
    """
    if departamentos and genero:
        try:
            resultado = eval(consultas[0]%(departamentos,genero))
        except Exception as e:
            print(e)
    elif departamentos:
        try:
            resultado = eval(consultas[1]%(departamentos))
        except Exception as e:
            print(e)
    elif genero:
        try:
            resultado = eval(consultas[2]%(genero))
        except Exception as e:
            print(e)
    else:
        try:
            resultado = eval(consultas[3])
        except Exception as e:
            print(e)
    return resultado
コード例 #26
0
ファイル: model.py プロジェクト: akestner/git-cola
    def config_dict(self, local=True):
        """parses the lines from git config --list into a dictionary"""

        kwargs = {
            'list': True,
            'global': not local, # global is a python keyword
        }
        config_lines = self.git.config(**kwargs).splitlines()
        newdict = {}
        for line in config_lines:
            try:
                k, v = line.split('=', 1)
            except:
                # value-less entry in .gitconfig
                continue
            v = core.decode(v)
            k = k.replace('.','_') # git -> model
            if v == 'true' or v == 'false':
                v = bool(eval(v.title()))
            try:
                v = int(eval(v))
            except:
                pass
            newdict[k]=v
        return newdict
コード例 #27
0
ファイル: SegmentCAD.py プロジェクト: mehrtash/Slicer-OpenCAD
  def onReload(self, moduleName="SegmentCAD"):
    #Generic reload method for any scripted module.
    #ModuleWizard will subsitute correct default moduleName.
    
    import imp, sys, os, slicer
    
    widgetName = moduleName + "Widget"

    # reload the source code
    # - set source file path
    # - load the module to the global space
    filePath = eval('slicer.modules.%s.path' % moduleName.lower())
    p = os.path.dirname(filePath)
    if not sys.path.__contains__(p):
      sys.path.insert(0,p)
    fp = open(filePath, "r")
    globals()[moduleName] = imp.load_module(
        moduleName, fp, filePath, ('.py', 'r', imp.PY_SOURCE))
    fp.close()

    # rebuild the widget
    # - find and hide the existing widget
    # - create a new widget in the existing parent
    # parent = slicer.util.findChildren(name='%s Reload' % moduleName)[0].parent()
    parent = self.parent
    for child in parent.children():
      try:
        child.hide()
      except AttributeError:
        pass
    globals()[widgetName.lower()] = eval(
        'globals()["%s"].%s(parent)' % (moduleName, widgetName))
    globals()[widgetName.lower()].setup()
コード例 #28
0
ファイル: vtk2xml.py プロジェクト: hs9906/paraview
def getReaderWriter(file_name, out_dir=None):
    r = vtk.vtkDataReader()
    r.SetFileName(file_name)
    f_base = os.path.splitext(file_name)[0]
    r.Update()
    reader = None
    writer = None
    xmlsuffix = '.xml'
    map = {'StructuredPoints': '.vti', 'StructuredGrid': '.vts',
           'RectilinearGrid': '.vtr', 'UnstructuredGrid': '.vtu',
           'PolyData': '.vtp'}
    for i in ['StructuredPoints', 'StructuredGrid', 'RectilinearGrid',
              'UnstructuredGrid', 'PolyData']:
        if eval('r.IsFile%s()'%i):
            reader = eval('vtk.vtk%sReader()'%i)
            if i == 'StructuredPoints':
                writer = eval('vtk.vtkXMLImageDataWriter()')
            else:
                writer = eval('vtk.vtkXML%sWriter()'%i)
            xmlsuffix = map[i]
            break
    if not reader:
        return None, None
    
    reader.SetFileName(file_name)
    reader.Update()

    out_file = f_base + xmlsuffix
    if out_dir:
        out_file = os.path.join(out_dir,
                                os.path.basename(f_base) + xmlsuffix)
    writer.SetFileName(out_file)
    return reader, writer
コード例 #29
0
ファイル: vcpparse.py プロジェクト: LinuxCNC/linuxcnc
def paramiterator(node):
    """ returns a list of all parameters for a widget element """
    outparams = {}
    for k, v in node.attributes.items():
	if v and v[0] in "{[(\"'":
	    v = eval(v)
	else:
	    try:
		v = int(v)
	    except ValueError:
		try:
		    v = float(v)
		except ValueError:
		    pass
	outparams[str(k)] = v

    for e in node.childNodes:
	if e.nodeType == e.ELEMENT_NODE \
		and (e.nodeName not in pyvcp_widgets.elements):
            try:
                v = eval(e.childNodes[0].nodeValue)
            except: 
                exc_type, exc_value, exc_tb = sys.exc_info()
                raise SystemExit, ("Error evaluating xml file:\n"
                    "Widget %s, Property %s\n%s: %s") % (
                        node.nodeName, e.nodeName, exc_type.__name__, exc_value)
	    outparams[str(e.nodeName)] = v
    return outparams
コード例 #30
0
ファイル: bjtuService.py プロジェクト: mnmlyn/Python_Learning
 def isLoginState(self):
     headers = {
         'Host': 'service.bjtu.edu.cn',
         'Connection': 'keep-alive',
         'User-Agent': 'Mozilla/5.0(Windows NT 10.0; Win64; x64) AppleWebKit/537.36(KHTML, like Gecko) Chrome/58.0.3029.96 Safari/537.36',
         'Accept': 'application/json, text/javascript, */*; q=0.01',
         'X-Requested-With': 'XMLHttpRequest',
         'Referer': 'http://service.bjtu.edu.cn/LoginAction.action',
         'Accept-Encoding': 'gzip, deflate, sdch',
         'Accept-Language': 'zh-CN,zh;q=0.8',
         'Cookie': self.cookie
     }
     url = 'http://service.bjtu.edu.cn/refreshaccount?t=' + str(random.random())
     r = requests.get(url, headers=headers)
     rc = r.content
     if self.account in rc:#证明当前登陆状态
         try:
             self.accountInfo = eval(rc)
         except:
             pass
         return True
     else:
         try:
             rc = rc.replace('null',"'null'")
             self.accountInfo = eval(rc)
             pass
         except:#可能是登陆了其他账号,销毁当前cookie,方便重新登陆
             self.setCookieLocal('ERROR')
             pass
         return False
コード例 #31
0
ファイル: algorithms.py プロジェクト: superplay1/c3
def adaptive_scan(x_init, fun=None, fun_grad=None, grad_lookup=None, options={}):
    """
    One dimensional scan of the function values around the initial point, using
    adaptive sampling

    Parameters
    ----------
    x_init : float
        Initial point
    fun : callable
        Goal function
    fun_grad : callable
        Function that computes the gradient of the goal function
    grad_lookup : callable
        Lookup a previously computed gradient
    options : dict
        Options include

        accuracy_goal: float
            Targeted accuracy for the sampling algorithm
        probe_list : list
            Points to definitely include in the sampling
        init_point : boolean
            Include the initial point in the sampling
    """
    if "accuracy_goal" in options:
        accuracy_goal = options["accuracy_goal"]
    else:
        accuracy_goal = 0.5
    print("accuracy_goal: " + str(accuracy_goal))

    probe_list = []
    if "probe_list" in options:
        for x in options["probe_list"]:
            probe_list.append(eval(x))

    if "init_point" in options:
        init_point = bool(options.pop("init_point"))
        if init_point:
            probe_list.append(x_init)

    # TODO make adaptive scan be able to do multidimensional scan
    bounds = options["bounds"][0]
    bound_min = bounds[0]
    bound_max = bounds[1]
    probe_list_min = min(probe_list)
    probe_list_max = max(probe_list)
    bound_min = min(bound_min, probe_list_min)
    bound_max = max(bound_max, probe_list_max)
    print(" ")
    print("bound_min: " + str((bound_min) / (2e9 * np.pi)))
    print("bound_max: " + str((bound_max) / (2e9 * np.pi)))
    print(" ")

    def fun1d(x):
        return fun([x])

    learner = adaptive.Learner1D(fun1d, bounds=(bound_min, bound_max))

    if probe_list:
        for x in probe_list:
            print("from probe_list: " + str(x))
            tmp = learner.function(x)
            print("done\n")
            learner.tell(x, tmp)

    adaptive.runner.simple(
        learner, goal=lambda learner_: learner_.loss() < accuracy_goal
    )
コード例 #32
0
    def __init__(self,unit_operation, container, graphicsView):
        l = ['Splitter','Mixer', 'DistillationColumn', 'Flash', 'CompoundSeparator', 'ShortcutColumn'] 
        stm = ['MaterialStream', 'EnergyStream']
        super(NodeItem, self).__init__()
        self.obj = unit_operation
        self.container = container
        self.graphicsView = graphicsView

        self.name = self.obj.name
        self.type = self.obj.type

        if (self.obj.modes_list):
            default_tooltip = f"{self.name}\n\n"
            default_tooltip_dict = self.obj.param_getter(self.obj.modes_list[0])
            for i, j in default_tooltip_dict.items():
                if j is not None:
                    default_tooltip = default_tooltip + f"   {i} : {j}\n"
            self.setToolTip(default_tooltip)

        self.nin = self.obj.no_of_inputs
        self.nop = self.obj.no_of_outputs
        if self.obj.type == 'Mixer':
            text, ok = QInputDialog.getText(self.container.graphicsView, 'Mixer', 'Enter number of input:')
            if ok and text:
                self.nin = int(text)
                self.obj.no_of_inputs = self.nin
                self.obj.variables['NI']['value'] = self.nin
        elif self.obj.type == 'Splitter':
            text, ok = QInputDialog.getText(self.container.graphicsView, 'Splitter', 'Enter number of output:')
            if ok and text:
                self.nop = int(text)
                self.obj.no_of_outputs = self.nop
                self.obj.variables['No']['value'] = self.nop
        elif self.obj.type == 'DistillationColumn':
            text, ok = QInputDialog.getText(self.container.graphicsView, 'DistillationColumn', 'Enter number of input:')
            if ok and text:
                self.nin = int(text)
                self.obj.no_of_inputs = self.nin
                self.obj.variables['Ni']['value'] = self.nin

        self.dock_widget = None
        lst.append(self)
        if self.obj.type in l:
            self.dock_widget = eval("DockWidget"+self.obj.type)(self.obj.name,self.obj.type,self.obj,self.container)
        elif self.obj.type in stm:
            self.dock_widget = eval("DockWidget"+self.obj.type)(self.obj.name,self.obj.type,self.obj,self.container)
        elif self.obj.type == "AdiabaticCompressor" or self.obj.type == "AdiabaticExpander":
            self.dock_widget = eval("DockWidgetCompressorExpander")(self.obj.name,self.obj.type,self.obj,self.container)
        else:
            self.dock_widget = DockWidget(self.obj.name,self.obj.type,self.obj,self.container)
        dock_widget_lst.append(self.dock_widget)
        self.main_window= findMainWindow(self)
        self.dock_widget.setFixedWidth(360)
        self.dock_widget.setFixedHeight(640)
        self.dock_widget.DockWidgetFeature(QDockWidget.AllDockWidgetFeatures)
        self.main_window.addDockWidget(Qt.LeftDockWidgetArea, self.dock_widget)
        self.dock_widget.hide()
        
        self.pic=QtGui.QPixmap("Icons/"+self.type+".png")
        self.rect = QtCore.QRect(0,0,self.pic.width(),self.pic.height())
        self.text = QGraphicsTextItem(self)
        f = QFont()
        f.setPointSize(8)
        self.text.setFont(f)
        self.text.setDefaultTextColor(QtGui.QColor(0,70,70,220))
        self.text.setParentItem(self)
        self.text.setPos(self.rect.width()-(self.rect.width()*0.9), self.rect.height())
        self.text.setPlainText(self.name) 
        
        self.setFlag(QtWidgets.QGraphicsPixmapItem.ItemIsMovable)
        self.setFlag(QtWidgets.QGraphicsPixmapItem.ItemIsSelectable)
    
        # Brush
        self.brush = QtGui.QBrush()
        self.brush.setStyle(QtCore.Qt.SolidPattern)
        self.brush.setColor(QtGui.QColor(80,0,90,255))
        # Pen
        self.pen = QtGui.QPen()
        self.pen.setStyle(QtCore.Qt.SolidLine)
        self.pen.setWidth(1)
        self.pen.setColor(QtGui.QColor(20,20,20,255))
    
        self.sel_pen = QtGui.QPen()
        self.sel_pen.setStyle(QtCore.Qt.SolidLine)
        self.sel_pen.setWidth(1)
        self.sel_pen.setColor(QtGui.QColor(220,220,220,255))
 
        # initializing the node sockets
        self.input , self.output = self.initialize_sockets(self.type)
コード例 #33
0
ファイル: itkExtras.py プロジェクト: vpoughon/ITK
def ipython_kw_matches(text):
    """Match named ITK object's named parameters"""
    import IPython
    import itk
    import re
    import inspect
    import itkTemplate
    regexp = re.compile(r'''
                    '.*?' |  # single quoted strings or
                    ".*?" |  # double quoted strings or
                    \w+     |  # identifier
                    \S  # other characters
                    ''', re.VERBOSE | re.DOTALL)
    ip = IPython.get_ipython()
    if "." in text:  # a parameter cannot be dotted
        return []
    # 1. Find the nearest identifier that comes before an unclosed
    # parenthesis e.g. for "foo (1+bar(x), pa", the candidate is "foo".
    if ip.Completer.readline:
        textUntilCursor = ip.Completer.readline.get_line_buffer()[:ip.Completer.readline.get_endidx()]
    else:
        # IPython >= 5.0.0, which is based on the Python Prompt Toolkit
        textUntilCursor = ip.Completer.text_until_cursor

    tokens = regexp.findall(textUntilCursor)
    tokens.reverse()
    iterTokens = iter(tokens)
    openPar = 0
    for token in iterTokens:
        if token == ')':
            openPar -= 1
        elif token == '(':
            openPar += 1
            if openPar > 0:
                # found the last unclosed parenthesis
                break
    else:
        return []
    # 2. Concatenate dotted names ("foo.bar" for "foo.bar(x, pa" )
    ids = []
    isId = re.compile(r'\w+$').match
    while True:
        try:
            ids.append(iterTokens.next())
            if not isId(ids[-1]):
                ids.pop()
                break
            if not iterTokens.next() == '.':
                break
        except StopIteration:
            break
    # lookup the candidate callable matches either using global_matches
    # or attr_matches for dotted names
    if len(ids) == 1:
        callableMatches = ip.Completer.global_matches(ids[0])
    else:
        callableMatches = ip.Completer.attr_matches('.'.join(ids[::-1]))
    argMatches = []
    for callableMatch in callableMatches:
        # drop the .New at this end, so we can search in the class members
        if callableMatch.endswith(".New"):
            callableMatch = callableMatch[:-4]
        try:
            object = eval(callableMatch, ip.Completer.namespace)
            if isinstance(object, itkTemplate.itkTemplate):
                # this is a template - lets grab the first entry to search for
                # the methods
                object = object.values()[0]
            namedArgs = []
            isin = isinstance(object, itk.LightObject)
            if inspect.isclass(object):
                issub = issubclass(object, itk.LightObject)
            if isin or (inspect.isclass(object) and issub):
                namedArgs = [n[3:] for n in dir(object) if n.startswith("Set")]
        except Exception as e:
            print(e)
            continue
        for namedArg in namedArgs:
            if namedArg.startswith(text):
                argMatches.append(u"%s=" % namedArg)
    return argMatches
コード例 #34
0
l=[eval(x) for x in input("Enter Elements of list").split(' ')]
print(l)
element=eval(input("enter element"))
index=0
while index<len(l):
    if (l[index]==element):
        print(index)
    index+=1
コード例 #35
0
def genJson(dbN, af, qText):

    qStmtL = qText.rstrip().lstrip().split('\r')

    (con, cursor) = mycgi.connectDB(db=dbN)

    tag = "pair_R%"

    cursor.execute(
        'select distinct samp_id from sample_tag where tag like "%s" and tag not like "%%,%%"'
        % tag)
    sIdL = [x for (x, ) in cursor.fetchall()]
    sIdL.sort()
    nullL = ["" for x in sIdL]

    geneIdxL = []
    geneDataL = []

    for i in range(len(qStmtL)):

        qStmt = qStmtL[i].rstrip().lstrip()

        if qStmt[0] == '(' and qStmt[-1] == ')':
            (qId, col, tbl, cnd) = eval(qStmt)
        elif qStmt in sampInfoH:
            (qId, col, tbl, cnd) = sampInfoH[qStmt]
        elif qStmt.count(':') == 2:
            (gN, mT, mV) = qStmt.split(':')
            (tbl, col, qIdF) = mutTypeH[mT]
            if (tbl == 'mutation_normal') or (tbl == 'mutation_rsq'):
                qId = gN + '-' + qIdF(mV) + ':' + mT[3:]
                cnd = 'gene_symL="%s" and %s like "%%%s%%"' % (gN, col, mV)
            else:
                qId = gN + '-' + qIdF(mV)
                cnd = 'gene_sym="%s" and %s like "%%%s%%"' % (gN, col, mV)
        elif qStmt.count(':') == 1:
            (gN, qId) = qStmt.split(':')
            (tbl, col) = otherTypeH[qId]
            if 'PATH' in qId:
                cnd = 'pathway="%s"' % gN
            elif 'TYPE' in qId:
                cnd = '%s' % gN
                col = gN
            else:
                cnd = 'gene_sym="%s"' % gN
            qId = gN + '-' + qId
        else:
            print '<b>Input Error: %s</b><br>' % qStmt
            sys.exit(1)

        if tbl in afColNameH:
            af_cond = 'and %s/(%s+%s) > %s' % (
                afColNameH[tbl][0], afColNameH[tbl][0], afColNameH[tbl][1], af)
            ord_cond = '%s desc' % afColNameH[tbl][0]
            af_frequency = ',' + afColNameH[tbl][0] + '/(' + afColNameH[tbl][
                0] + '+' + afColNameH[tbl][1] + ') as frequency'
            af_numerator = ',' + afColNameH[tbl][0]
            af_denominator = ',(' + afColNameH[tbl][0] + '+' + afColNameH[tbl][
                1] + ') as denominator'
        else:
            af_cond = ''
            ord_cond = col
            af_frequency = ''
            af_numerator = ''
            af_denominator = ''

        count = 0
        dataL = []
        frequency_data = []
        pair_data = []
        fraction_data = []

        for sId in sIdL:
            pair_fraction = ''
            count_flag = 0
            tag = "pair_P:"
            cursor.execute(
                'select samp_id from sample_tag where tag like "%s%s"' %
                (tag, sId))
            t = cursor.fetchone()
            pair_id = "%s" % (t[0], )

            cursor.execute(
                'select %s %s %s %s from %s where samp_id="%s" and %s %s order by %s limit 1'
                % (col, af_frequency, af_numerator, af_denominator, tbl,
                   pair_id, cnd, af_cond, ord_cond))
            p = cursor.fetchone()
            if p:
                count += 1
                count_flag = 1
                if tbl in afColNameH:
                    if p[1]:
                        pair_freq = pair_id + ":" + str(float(p[1]))
                        pair_data.append(pair_freq)

                        pair_fraction += str(int(p[2])) + '/' + str(int(p[3]))
                elif (tbl in 'rpkm_gene_expr') or (tbl in 'array_cn') or (
                        tbl in 'array_pathway'
                ) or (tbl in 'rpkm_pathway') or (tbl in 'array_gene_expr') or (
                        tbl in 'array_subtype') or (tbl in 'rpkm_subtype'):
                    pair_value = pair_id + ":" + str(float(p[0]))
                    pair_data.append(pair_value)
                else:
                    pair_d = pair_id + ":nofreq"
                    pair_data.append(pair_d)
                    pair_fraction = ':'
            else:
                if tbl in afColNameH:
                    if tbl in "mutation_normal":
                        tag = "Xseq_%"
                        cursor.execute(
                            'select samp_id from sample_tag where samp_id = "%s" and tag like "%s"'
                            % (pair_id, tag))
                        x = cursor.fetchone()
                        if x:
                            pair_flag = pair_id + ":" + str(0)
                        else:
                            pair_flag = pair_id + ":null"

                    else:
                        cursor.execute(
                            'select samp_id from splice_normal where samp_id = "%s" limit 1'
                            % pair_id)
                        m = cursor.fetchone()
                        if m:
                            pair_flag = pair_id + ":" + str(0)
                        else:
                            pair_flag = pair_id + ":null"
                else:
                    pair_flag = pair_id + ":null"
                pair_data.append(pair_flag)

            cursor.execute(
                'select %s %s %s %s from %s where samp_id="%s" and %s %s order by %s limit 1'
                % (col, af_frequency, af_numerator, af_denominator, tbl, sId,
                   cnd, af_cond, ord_cond))
            r = cursor.fetchone()

            if r:
                dataL.append("%s" % (r[0], ))
                if count_flag == 0:
                    count += 1

                if tbl in afColNameH:
                    if r[1]:
                        fraction = str(int(r[2])) + "/" + str(int(r[3]))
                        fraction_data.append(fraction + ":" + pair_fraction)
                        frequency_data.append(float(r[1]))
                else:
                    fraction_data.append("")
                    frequency_data.append('nofreq')
            else:
                if tbl in afColNameH:
                    if tbl in "mutation_normal":
                        tag = "Xseq_%"
                        cursor.execute(
                            'select samp_id from sample_tag where samp_id ="%s" and tag like "%s"'
                            % (sId, tag))
                        x = cursor.fetchone()
                        if x:
                            data_flag = qId
                        else:
                            data_flag = ""
                    else:
                        cursor.execute(
                            'select samp_id from splice_normal where samp_id = "%s" limit 1'
                            % sId)
                        m = cursor.fetchone()
                        if m:
                            data_flag = qId
                        else:
                            data_flag = ""
                else:
                    data_flag = ""

                dataL.append(data_flag)
                fraction_data.append(pair_fraction)
                frequency_data.append(0)

        geneIdxL.append((qId, i))

        if 'RPKM' in qId:
            for i in range(len(pair_data)):
                try:
                    dataL[i] = str(
                        log2(float(pair_data[i].split(':')[1]) + 1) -
                        log2(float(dataL[i]) + 1))
                except:
                    dataL[i] = ""
        elif 'PATH' in qId or 'TYPE' in qId or 'EXPR' in qId or 'CNA' in qId:
            for i in range(len(pair_data)):
                try:
                    dataL[i] = str(
                        float(pair_data[i].split(':')[1]) - float(dataL[i]))
                except:
                    dataL[i] = ""
        else:
            for i in range(len(pair_data)):
                try:
                    frequency_data[i] = str(
                        log2(float(pair_data[i].split(':')[1]) + 0.01) -
                        log2(float(frequency_data[i]) + 0.01))
                except:
                    frequency_data[i] = ""
                    dataL[i] = ""
        geneDataL.append({
            "rppa":
            nullL,
            "hugo":
            qId,
            "mutations":
            dataL,
            "mrna":
            nullL,
            "cna":
            nullL,
            "freq":
            frequency_data,
            "fraction":
            fraction_data,
            "percent_altered":
            "%s (%d%s)" % (count, 100. * count / len(sIdL), '%')
        })

    resultH = { \
     "dbN":dbN,
     "af":af,
     "hugo_to_gene_index":dict(geneIdxL), \
     "gene_data": geneDataL, \
     "samples": dict((sIdL[i],i) for i in range(len(sIdL)))
     }

    jsonStr = json.dumps(resultH, sort_keys=True).replace('""', 'null')

    #print jsonStr

    jsonFile = open('/var/www/html/js/gene_data.json', 'w')
    jsonFile.write(jsonStr)
    jsonFile.close()
コード例 #36
0
ファイル: util.py プロジェクト: xinguoliu/pytrip
 def f(x):
     return eval(code, locals())
コード例 #37
0
ファイル: name_n.py プロジェクト: kimxminsu/Python
def eval(tree):
    if not tree:          return 0  # tree == None
    opr = tree[0]
    if opr == '+':
        return eval(tree[1]) + eval(tree[2])
    elif opr == '-':
        return eval(tree[1]) - eval(tree[2])
    elif opr == '*':
        return eval(tree[1]) * eval(tree[2])
    elif opr == '/':
        return eval(tree[1]) / eval(tree[2])
    elif opr == 'UMINUS':
        return - eval(tree[1])
    elif opr == 'N':
        return tree[1]
    elif opr == 'NAME':
        return names[tree[1]]
    elif opr == ';':
        eval(tree[1]); return eval(tree[2])
    elif opr == '=':
        names[tree[1]] = eval(tree[2])
    else:
        print("unexpeced case : ", tree)
コード例 #38
0
ファイル: name_n.py プロジェクト: kimxminsu/Python
# dictionary of names
names = {}


def eval(tree):
    if not tree:          return 0  # tree == None
    opr = tree[0]
    if opr == '+':
        return eval(tree[1]) + eval(tree[2])
    elif opr == '-':
        return eval(tree[1]) - eval(tree[2])
    elif opr == '*':
        return eval(tree[1]) * eval(tree[2])
    elif opr == '/':
        return eval(tree[1]) / eval(tree[2])
    elif opr == 'UMINUS':
        return - eval(tree[1])
    elif opr == 'N':
        return tree[1]
    elif opr == 'NAME':
        return names[tree[1]]
    elif opr == ';':
        eval(tree[1]); return eval(tree[2])
    elif opr == '=':
        names[tree[1]] = eval(tree[2])
    else:
        print("unexpeced case : ", tree)


print(eval(ast))
コード例 #39
0
# get parameter from shell
parser = argparse.ArgumentParser(description='Deploy iscsi to host')
parser.add_argument('-i',
                    type=str,
                    help="""specify inventory host file
                        default=/etc/ansible/hosts""")
parser.add_argument('--private-key',
                    type=str,
                    help='use this file to authenticate the connection')
parser.add_argument('-e',
                    type=str,
                    help='set additional variables as key=value or YAML/JSON')

args = parser.parse_args()
argument_dict = eval(args.e)

# update the variable from shell arguments
locals().update(argument_dict)
virtenv_path = "%s/virtualenv/iscsi/" % zstack_root
iscsi_root = "%s/iscsi" % zstack_root
# create log
logger_dir = "/var/log/zstack/"
create_log(logger_dir)
host_post_info = HostPostInfo()
host_post_info.host_inventory = args.i
host_post_info.host = host
host_post_info.post_url = post_url
host_post_info.private_key = args.private_key

# include zstacklib.py
コード例 #40
0
    def __init__(self, aesthetics, data):
        # ggplot should just 'figure out' which is which
        if not isinstance(data, pd.DataFrame):
            aesthetics, data = data, aesthetics

        self.aesthetics = aesthetics
        self.data = data

        # TODO: This should probably be modularized
        # Look for alias/lambda functions
        for ae, name in self.aesthetics.items():
            if name not in self.data and not is_identity(name):
                result = re.findall(r'(?:[A-Z])|(?:[A-Za_-z0-9]+)|(?:[/*+_=\(\)-])', name)
                if re.match("factor[(][A-Za-z_0-9]+[)]", name):
                    m = re.search("factor[(]([A-Za-z_0-9]+)[)]", name)
                    self.data[name] = self.data[m.group(1)].apply(str)
                else:
                    lambda_column = ""
                    for item in result:
                        if re.match("[/*+_=\(\)-]", item):
                            pass
                        elif re.match("^[0-9.]+$", item):
                            pass
                        else:
                            item = "self.data.get('%s')" % item
                        lambda_column += item
                    self.data[name] = eval(lambda_column)
        # defaults
        self.geoms= []
        self.n_wide = 1
        self.n_high = 1
        self.n_dim_x = None
        self.n_dim_y = None
        # facets
        self.facets = []
        self.facet_type = None
        self.facet_scales = None
        # components
        self.title = None
        self.xlab = None
        self.ylab = None
        # format for x/y major ticks
        self.xtick_formatter = None
        self.xbreaks = None
        self.xtick_labels = None
        self.xmajor_locator = None
        self.xminor_locator = None
        self.ytick_formatter = None
        self.xlimits = None
        self.ylimits = None
        self.scale_y_reverse = None
        self.scale_x_reverse = None
        # legend is a dictionary of { legend_type: { visual_value: legend_key } },
        # where legend_type is one of "color", "linestyle", "marker", "size";
        # visual_value is color value, line style, marker character, or size value;
        # and legend_key is usually a quantile (???).
        self.legend = {}

        # continuous color configs
        self.color_scale = None
        self.colormap = plt.cm.Blues
コード例 #41
0
def _getEi(beam):
    props = eval(open(os.path.join(beam, 'out/props.json')).read())
    ei = float(props['average energy'].split(' ')[0])
    return ei
コード例 #42
0
ファイル: make_hist.py プロジェクト: mikaelbk/stk1000
from matplotlib.pyplot import *
from numpy import *
import scipy.integrate as integrate
infile = open("data.txt", "r")
infile.readline()

x = []
hist_list = []
for line in infile.readlines():
    words = line.split(" ")
    hist_list.append(float(words[1]))
    x.append(int(eval(words[0])))

s = std(hist_list)
m = mean(hist_list)


def gaussian(x, s=s, m=m):
    return (1 / (sqrt(2 * pi) * s)) * exp(-0.5 * ((x - m) / s)**2)


def integral(fra=0, til=100, func=gaussian):
    return integrate.quad(func, fra, til)[0]


if __name__ == '__main__':
    hist(hist_list, bins=linspace(0.5, 1.2, 8), histtype="bar", color='green')
    plot(linspace(min(hist_list), max(hist_list), 1000), [
        gaussian(i, s, m) * 17
        for i in linspace(min(hist_list), max(hist_list), 1000)
    ],
コード例 #43
0
def mainSearch(url):
    if '|SPLIT|' in url: url, site = url.split('|SPLIT|')
    term = url
    if term == "null":  term = kodi.get_keyboard('Search %s' % kodi.get_name())

    if term:
        search_on_off = kodi.get_setting("search_setting")
        if search_on_off == "true":
            delTerm(term)
            addTerm(term)

        display_term = term
        term = quote_plus(term)
        term = term.lower()

        if site == 'all':
            sources = __all__
            search_sources = []
            for i in sources:
                try:
                    if eval(i + ".search_tag") == 1: search_sources.append(i)
                except:
                    pass

            if search_sources:
                i = 0
                source_num = 0
                failed_list = ''
                line1 = kodi.giveColor('Searching: ', 'white') + kodi.giveColor('%s', 'dodgerblue')
                line2 = kodi.giveColor('Found: %s videos', 'white')
                line3 = kodi.giveColor('Source: %s of ' + str(len(search_sources)), 'white')

                kodi.dp.create(kodi.get_name(), '', line2, '')
                xbmc.executebuiltin('Dialog.Close(busydialog)')
                for u in sorted(search_sources):
                    if kodi.dp.iscanceled(): break
                    try:
                        i += 1
                        progress = 100 * int(i) / len(search_sources)
                        kodi.dp.update(progress, line1 % u.title(), line2 % str(source_num), line3 % str(i))
                        search_url = eval(u + ".search_base") % term
                        try:
                            source_n = eval(u + ".content('%s',True)" % search_url)
                        except:
                            source_n = 0
                        try:
                            source_n = int(source_n)
                        except:
                            source_n = 0
                        if not source_n:
                            if failed_list == '':
                                failed_list += str(u).title()
                            else:
                                failed_list += ', %s' % str(u).title()
                        else:
                            source_num += int(source_n)
                    except:
                        pass
                kodi.dp.close()
                if failed_list != '':
                    kodi.notify(msg='%s failed to return results.' % failed_list, duration=4000, sound=True)
                    log_utils.log('Scrapers failing to return search results are :: : %s' % failed_list,
                                  xbmc.LOGERROR)
                else:
                    kodi.notify(msg='%s results found.' % str(source_num), duration=4000, sound=True)
                xbmcplugin.setContent(kodi.syshandle, 'movies')
                xbmcplugin.endOfDirectory(kodi.syshandle, cacheToDisc=True)
                local_utils.setView('search')
        else:
            search_url = eval(site + ".search_base") % term
            eval(site + ".content('%s')" % search_url)
    else:
        kodi.notify(msg='Blank searches are not allowed.')
        quit()
コード例 #44
0
ファイル: 268324.py プロジェクト: 382335657/pythonHomework
def insert(k:int):
    print("insert:{}".format(k))
    if len(sortd)==0:
        sortd.append(k)
    else:
        for i in range(len(sortd)):
#            if i == len(sortd)-1: sortd.append(k)
            if k>sortd[i] and k<=sortd[i+1]:
                sortd.insert(i,k)
                break
lists = list(eval(input()))
#print(lists)
sortd = list()
while True:
    temp = lists.pop(0)
    insert(temp)
    if len(lists)==0: break
print(sortd)
コード例 #45
0
ファイル: TwoPSet.py プロジェクト: kishore-narendran/crdt-py
 def get(self, client_id):
     add_set_value = self.add_set.get(client_id)
     delete_set_value = self.delete_set.get(client_id)
     final_set = set(eval(add_set_value)).difference(set(eval(delete_set_value)))
     return repr(final_set)
コード例 #46
0
################################################################################
if __name__ == '__main__':

    setup_base = "from __main__ import moduletester \n"\
                 "import numpy\n" \
                 "tester = moduletester(module)\n"
#    setup_new = "import np.ma.core_ini as module\n"+setup_base
    setup_cur = "import np.ma.core as module\n"+setup_base
#    setup_alt = "import np.ma.core_alt as module\n"+setup_base
#    setup_tmp = "import np.ma.core_tmp as module\n"+setup_base

    (nrepeat, nloop) = (10, 10)

    if 1:
        for i in range(1,8):
            func = 'tester.test_%i()' % i
#            new = timeit.Timer(func, setup_new).repeat(nrepeat, nloop*10)
            cur = timeit.Timer(func, setup_cur).repeat(nrepeat, nloop*10)
#            alt = timeit.Timer(func, setup_alt).repeat(nrepeat, nloop*10)
#            tmp = timeit.Timer(func, setup_tmp).repeat(nrepeat, nloop*10)
#            new = np.sort(new)
            cur = np.sort(cur)
#            alt = np.sort(alt)
#            tmp = np.sort(tmp)
            print("#%i" % i +50*'.')
            print(eval("moduletester.test_%i.__doc__" % i))
#            print "core_ini     : %.3f - %.3f" % (new[0], new[1])
            print("core_current : %.3f - %.3f" % (cur[0], cur[1]))
#            print "core_alt     : %.3f - %.3f" % (alt[0], alt[1])
#            print "core_tmp     : %.3f - %.3f" % (tmp[0], tmp[1])
コード例 #47
0
def train_test():
    for k in model_select:
        
        table = BeautifulTable()
        avgtable = BeautifulTable()
        fieldnames1 = [model_lst[k],'Avg','Std_dev'] #column names report GLOBAL CSV
        folder = os.path.join(cwd,'Report_'+str(model_lst[k]))
        if not os.path.exists(folder):
            os.mkdir(folder)

        logfilepath = os.path.join(folder,'log.txt')
        logfile = open(logfilepath,"w") 

        with open(os.path.join(folder,'Report_folds.csv'),'w') as f_fold, open(os.path.join(folder,'Report_global.csv'),'w') as f_global:
            writer = csv.DictWriter(f_fold, fieldnames = fieldnames)
            writer1  = csv.DictWriter(f_global, fieldnames = fieldnames1)
            writer.writeheader()
            writer1.writeheader()
            t0 = 0
            t1 = 0
            for i in range(1,nfold+1):
                
                t0 = time.time()
                setSeeds(0)
                
                class Traindataset(Dataset):
                    def __init__(self):
                        self.data=trainfolds[i-1]
                        self.x_data=torch.from_numpy(np.asarray(self.data.iloc[:, 0:-1])) 
                        self.len=self.data.shape[0]
                        self.y_data = torch.from_numpy(np.asarray(self.data.iloc[:, [-1]]))
                        if (use_cuda):
                            self.x_data = self.x_data.cuda()
                            self.y_data = self.y_data.cuda()
                    def __getitem__(self, index):
                        return self.x_data[index], self.y_data[index]
                    def __len__(self):
                        return self.len
                class Testdataset(Dataset):
                    def __init__(self):
                        self.data=testfolds[i-1]
                        self.x_data=torch.from_numpy(np.asarray(self.data.iloc[:, 0:-1]))
                        self.len=self.data.shape[0]
                        self.y_data = torch.from_numpy(np.asarray(self.data.iloc[:, [-1]]))
                        if (use_cuda):
                            self.x_data = self.x_data.cuda()
                            self.y_data = self.y_data.cuda()
                    def __getitem__(self, index):
                        return self.x_data[index], self.y_data[index]
                    def __len__(self):
                        return self.len

                traindataset = Traindataset()
                testdataset = Testdataset()
                testdataset_U = Testdataset()    # Aggiunto, relativo agli UNLEARNED

                header(model_lst,k,i,traindataset,testdataset)

                #train_sampler,dev_sampler,test_sampler=dev_shuffle(shuffle_train,shuffle_test,val_split,traindataset,testdataset)
                #train_sampler,dev_sampler,test_val_sampler,test_sampler=data_split(shuffle_train,shuffle_test,val_split,test_val_split,traindataset,testdataset)
                
                #loaders
                train_loader = torch.utils.data.DataLoader(traindataset, batch_size=batch_size, 
                                                           sampler=train_sampler,drop_last=True)
                test_val_loader = torch.utils.data.DataLoader(testdataset, batch_size=batch_size,
                                                                sampler=test_val_sampler,drop_last=True)
                dev_loader = torch.utils.data.DataLoader(traindataset, batch_size=batch_size, 
                                                           sampler=dev_sampler,drop_last=True)
                
                # Questo l'ho cambiato da 'testdataset' a 'testdataset_U'
                test_loader = torch.utils.data.DataLoader(testdataset_U, batch_size=batch_size,
                                                                sampler=test_sampler,drop_last=True)
                modelClass = "Model" + str(k)
                model = eval(modelClass)()
                
                if (use_cuda):
                    model = model.cuda()

                if doTrain:
                    
                    criterion = nn.BCELoss(size_average=True)
                    optimizer = torch.optim.SGD(model.parameters(), lr)    
                    msg = 'Accuracy on test set before training: '+str(accuracy(test_loader, model))+'\n'
                    print(msg)
                    logfile.write(msg + "\n")
                    #EARLY STOP
                    epoch = 0
                    patience = 0
                    best_acc_dev=0
                    while (epoch<maxepoch and patience < maxpatience):
                        running_loss = 0.0
                        for l, data in enumerate(train_loader, 0):
                            inputs, labels = data
                            if use_cuda:
                                inputs, labels = inputs.cuda(), labels.cuda()
                            inputs, labels = Variable(inputs), Variable(labels)
                            y_pred = model(inputs)
                            if use_cuda:
                                y_pred = y_pred.cuda()
                            loss = criterion(y_pred, labels)
                            optimizer.zero_grad()
                            loss.backward()
                            optimizer.step()
                            running_loss += loss.item()
                            #print accuracy ever l mini-batches
                            if l % 2000 == 1999:
                                msg = '[%d, %5d] loss: %.3f' %(epoch + 1, l + 1, running_loss / 999)
                                print(msg)
                                logfile.write(msg + "\n")
                                running_loss = 0.0
                                #msg = 'Accuracy on dev set:' + str(accuracy(dev_loader))
                                #print(msg)
                                #logfile.write(msg + "\n")        
                        accdev = (accuracy(dev_loader, model))
                        msg = 'Accuracy on dev set:' + str(accdev)
                        print(msg)
                        logfile.write(msg + "\n")        
                        is_best = bool(accdev > best_acc_dev)
                        best_acc_dev = (max(accdev, best_acc_dev))
                        save_checkpoint({
                            'epoch': epoch + 1,
                            'state_dict': model.state_dict(),
                            'best_acc_dev': best_acc_dev
                        }, is_best,os.path.join(folder,'F'+str(i)+'best.pth.tar'), logfile)
                        if is_best:
                            patience=0
                        else:
                            patience = patience+1
                        epoch = epoch+1
                        logfile.flush()
                        
                if doEval:
                    if use_cuda:                        
                        state = torch.load(os.path.join(folder,'F'+str(i)+'best.pth.tar'))
                    else:
                        state = torch.load(os.path.join(folder,'F'+str(i)+'best.pth.tar'), map_location=lambda storage, loc: storage)
                    stop_epoch = state['epoch']
                    model.load_state_dict(state['state_dict'])
                    if not use_cuda:
                        model.cpu()
                    accuracy_dev = state['best_acc_dev']
                    model.eval()
                    acctest = (accuracy(test_loader, model))
                    acctest_val = (accuracy(test_val_loader, model))
                    accs[i-1] = acctest
                    accs_test_val[i-1] = acctest_val
                    
                    precision_0_U,recall_0_U,f1_score_0_U = pre_rec(test_loader, model, 0.0)
                    precisions_0_U[i-1] = precision_0_U
                    recalls_0_U[i-1] = recall_0_U
                    f1_scores_0_U[i-1] = f1_score_0_U
                    
                    precision_1_U,recall_1_U,f1_score_1_U = pre_rec(test_loader, model, 1.0)
                    precisions_1_U[i-1] = precision_1_U
                    recalls_1_U[i-1] = recall_1_U
                    f1_scores_1_U[i-1] = f1_score_1_U
                    
                    precision_0_L,recall_0_L,f1_score_0_L = pre_rec(test_val_loader, model, 0.0)
                    precisions_0_L[i-1] = precision_0_L
                    recalls_0_L[i-1] = recall_0_L
                    f1_scores_0_L[i-1] = f1_score_0_L
                    
                    precision_1_L,recall_1_L,f1_score_1_L = pre_rec(test_val_loader, model, 1.0)
                    precisions_1_L[i-1] = precision_1_L
                    recalls_1_L[i-1] = recall_1_L
                    f1_scores_1_L[i-1] = f1_score_1_L
                    
                    accs_dev[i-1] = accuracy_dev
                    
                    writer.writerow({'Fold': i,'Acc_L': acctest_val, 'Acc_U': acctest,
                                     #'P_0_U': precision_0_U,'R_0_U': recall_0_U,'F1_0_U': f1_score_0_U,
                                     'R_0_U': recall_0_U,
                                     #'P_1_U': precision_1_U,'R_1_U': recall_1_U,'F1_1_U': f1_score_1_U,
                                     'R_1_U': recall_1_U,
                                     #'P_0_L': precision_0_L,'R_0_L': recall_0_L,'F1_0_L': f1_score_0_L,
                                     'R_0_L': recall_0_L,
                                     #'P_1_L': precision_1_L,'R_1_L': recall_1_L,'F1_1_L': f1_score_1_L,
                                     'R_1_L': recall_1_L,
                                     'Stop_epoch': stop_epoch,'Accuracy_dev': accuracy_dev})
                    table.column_headers = fieldnames
                    table.append_row([i,acctest_val,acctest,
                                      #precision_0_U,recall_0_U,f1_score_0_U,
                                      recall_0_U,
                                      #precision_1_U,recall_1_U,f1_score_1_U,
                                      recall_1_U,
                                      #precision_0_L,recall_0_L,f1_score_0_L,
                                      recall_0_L,
                                      #precision_1_L,recall_1_L,f1_score_1_L,
                                      recall_1_L,
                                      stop_epoch,accuracy_dev])
                    print(table)
                    print('----------------------------------------------------------------------')
                    logfile.write(str(table) + "\n----------------------------------------------------------------------\n")
                    t1 = time.time()
                    times[i-1] = int(t1-t0)
            
            duration = str(datetime.timedelta(seconds=np.sum(times)))
            writer.writerow({})
            writer.writerow({'Fold': 'Elapsed time: '+duration})
            avg_acc_test_val = round(np.average(accs_test_val),3)
            std_acc_test_val = round(np.std(accs_test_val),3)
            
            avg_acc_test_val,avg_a,avg_p_0_U,avg_r_0_U,avg_f_0_U,avg_p_1_U,avg_r_1_U,avg_f_1_U,avg_p_0_L,avg_r_0_L,avg_f_0_L,avg_p_1_L,avg_r_1_L,avg_f_1_L,avg_a_d=averages([accs_test_val,accs,precisions_0_U,recalls_0_U,f1_scores_0_U,precisions_1_U,recalls_1_U,f1_scores_1_U,precisions_0_L,recalls_0_L,f1_scores_0_L,precisions_1_L,recalls_1_L,f1_scores_1_L,accs_dev])
            std_acc_test_val,std_a,std_p_0_U,std_r_0_U,std_f_0_U,std_p_1_U,std_r_1_U,std_f_1_U,std_p_0_L,std_r_0_L,std_f_0_L,std_p_1_L,std_r_1_L,std_f_1_L,std_a_d=stds([accs_test_val,accs,precisions_0_U,recalls_0_U,f1_scores_0_U,precisions_1_U,recalls_1_U,f1_scores_1_U,precisions_0_L,recalls_0_L,f1_scores_0_L,precisions_1_L,recalls_1_L,f1_scores_1_L,accs_dev])
            
            writer1.writerow({model_lst[k]: 'Acc_U','Avg': avg_a,'Std_dev': std_acc_test_val})
            writer1.writerow({model_lst[k]: 'Acc_L','Avg': avg_acc_test_val,'Std_dev': std_a})
            writer1.writerow({model_lst[k]: 'P_0_U','Avg': avg_p_0_U ,'Std_dev': std_p_0_U})
            writer1.writerow({model_lst[k]: 'R_0_U','Avg': avg_r_0_U,'Std_dev': std_r_0_U})
            writer1.writerow({model_lst[k]: 'F1_0_U','Avg': avg_f_0_U,'Std_dev': std_f_0_U})
            writer1.writerow({model_lst[k]: 'P_1_U','Avg': avg_p_1_U,'Std_dev': std_p_1_U})
            writer1.writerow({model_lst[k]: 'R_1_U','Avg': avg_r_1_U,'Std_dev': std_r_1_U})
            writer1.writerow({model_lst[k]: 'F1_1_U','Avg': avg_f_1_U,'Std_dev': std_f_1_U})            
            writer1.writerow({model_lst[k]: 'P_0_L','Avg': avg_p_0_L,'Std_dev': std_p_0_L})
            writer1.writerow({model_lst[k]: 'R_0_L','Avg': avg_r_0_L,'Std_dev': std_r_0_L})
            writer1.writerow({model_lst[k]: 'F1_0_L','Avg': avg_f_0_L,'Std_dev': std_f_0_L})
            writer1.writerow({model_lst[k]: 'P_1_L','Avg': avg_p_1_L,'Std_dev': std_p_1_L})
            writer1.writerow({model_lst[k]: 'R_1_L','Avg': avg_r_1_L,'Std_dev': std_r_1_L})
            writer1.writerow({model_lst[k]: 'F1_1_L','Avg': avg_f_1_L,'Std_dev': std_f_1_L})                        
            writer1.writerow({model_lst[k]: 'Acc_dev','Avg': avg_a_d,'Std_dev': std_a_d})
            writer1.writerow({})
            writer1.writerow({model_lst[k]: 'Elapsed time: '+duration})
            avgtable.column_headers = fieldnames1
            avgtable.append_row(['Acc_U',avg_a,std_a])
            avgtable.append_row(['Acc_L',avg_acc_test_val,std_acc_test_val])
            avgtable.append_row(['P_0_U',avg_p_0_U,std_p_0_U])
            avgtable.append_row(['R_0_U',avg_r_0_U,std_r_0_U])
            avgtable.append_row(['F1_0_U',avg_f_0_U,std_f_0_U])
            avgtable.append_row(['P_1_U',avg_p_1_U,std_p_1_U])
            avgtable.append_row(['R_1_U',avg_r_1_U,std_r_1_U])
            avgtable.append_row(['F1_1_U',avg_f_1_U,std_f_1_U])                        
            avgtable.append_row(['P_0_L',avg_p_0_L,std_p_0_L])
            avgtable.append_row(['R_0_L',avg_r_0_L,std_r_0_L])
            avgtable.append_row(['F1_0_L',avg_f_0_L,std_f_0_L])
            avgtable.append_row(['P_1_L',avg_p_1_L,std_p_1_L])
            avgtable.append_row(['R_1_L',avg_r_1_L,std_r_1_L])
            avgtable.append_row(['F1_1_L',avg_f_1_L,std_f_1_L])            
            avgtable.append_row(['Accuracy_dev',avg_a_d,std_a_d])
            print(avgtable)
            logfile.write(str(avgtable) + "\n")
            msg = 'Elapsed time: '+ duration + '\n\n'
            print(msg)
            logfile.write(msg )

        logfile.close()
コード例 #48
0
ファイル: xml_pickle-0.49.py プロジェクト: andymtv/gnosis-web
def safe_eval(s):
    if 0:   # Condition for malicious string in eval() block
        raise "SecurityError", \
              "Malicious string '%s' should not be eval()'d" % s
    else:
        return eval(s)
コード例 #49
0
ファイル: caf1.py プロジェクト: shash43/Cafe-Billing-System
def btnEqualsInput():
    global operator
    sumup = str(eval(operator))
    text_Input.set(sumup)
    operator = ""
コード例 #50
0
		else:
			sample_attempt.append((r, False))


# Main
program_filename = sys.argv[1]
queries = sys.argv[2].split(',')
domain_filename = sys.argv[3]
domain_file = open(domain_filename, 'r')
for line in domain_file:
	if len(line) <= 2:
		continue
	parts = line.split(' ')
	instances = parts[1].split('&')
	for inst in instances:
		domain.append(gringoFun(parts[0], [eval(arg) for arg in inst.split(';')]))
	if parts[0] in queries:
		for inst in instances:
			query_count[gringoFun(parts[0], [eval(arg) for arg in inst.split(';')])] = 0

print 'domain', domain
print 'query atoms', query_count.keys()

iter_count = 0
random.seed()

sample_count = 0

# Generate First Sampling
whole_model = None
#prg.conf.solve.models = 1
コード例 #51
0
n = eval(input())

for i in range(1, n + 1):
    #print('*',end='')
    for j in range(1, n + 1):
        print('*', end=' ')
    print()
コード例 #52
0
def matchmark(colitem, markexpr):
    """Tries to match on any marker names, attached to the given colitem."""
    return eval(markexpr, {}, MarkMapping.from_keywords(colitem.keywords))
        bus_label[128] = 1

    if rap_label[0] == 0:
        bus_label[108:111] = [0] * 3
        bus_label[110] = 1
    if rap_label[0] == 1:
        bus_label[108:111] = [0] * 3
        bus_label[109] = 1
    return bus_label


with open(
        '/home/bobby/work/code/bussiness100/prediction_raptestlist.txt') as f:
    bussiness = f.readlines()
    buss_names = [i.split(' ')[0] for i in bussiness]
    buss_labels = [eval(i.split(' ')[1:][0]) for i in bussiness]
    copy_buss_labels = copy.deepcopy(buss_labels)
with open('/home/bobby/work/RAP/minivision_rap/testlist.txt') as f:
    rap = f.readlines()
    rap_labels = [[int(j) for j in i.split(' ')[1:]] for i in rap]
labels_transed = []
for i in range(len(bussiness)):
    temp = classtransfer(buss_labels[i], rap_labels[i])
    labels_transed.append(temp)

Flag = 0
if not labels_transed == copy_buss_labels:
    print('modified!')
    Flag = 1

if Flag:
コード例 #54
0
        if key == REFIDKEY and row[key]:
            result.update({DORAPIDKEY: None})
            payload = {
                'q': qfield + ":" + row[key],
                'fq': fq,
                'sort': sort,
                'fl': fl,
                'rows': rows,
                'wt': wt
            }  # add 'indent':indent if wt=xml
            r = requests.get(base_url, params=payload)
            if r.status_code != 200:
                print "Oops, something went wrong (got HTTP" + str(
                    r.status_code) + ")!"
            else:
                rr = eval(r.text)
                response = rr["response"]
                if int(response["numFound"]) > 1:
                    print "Warning: Found multiple records for RefID " + row[
                        key] + "!"
                    continue
                elif int(response["numFound"]) < 1 and verbose:
                    print "Notice: Did not find any record for RefID " + row[
                        key] + "."
                    continue
                elif int(response["numFound"]) == 1:
                    result[DORAPIDKEY] = response["docs"][0][pidkey]
    outvals.append(result)

### write result
コード例 #55
0
        # print(resp_dict)

        actual = login_resp.text
        if case.expected == actual:  # 判断期望结果是否与实际结果一致
            do_excel.write_result(case.case_id + 1, actual, 'PASS')
        else:
            do_excel.write_result(case.case_id + 1, actual, 'FAIL')

    #测试充值
    do_excel = DoExcel(contants.case_file, sheet_name='recharge')
    cases = do_excel.get_cases()
    http_request = HttpRequest2()
    #登录,获取session
    login_url = 'http://test.lemonban.com/futureloan/mvc/api/member/login'
    login_param = '{"mobilephone":"18624342322", "pwd":"123456"}'
    login_resp2 = http_request.request('get', login_url, eval(login_param))

    print('test_recharge')
    for case in cases:

        print(case.__dict__)
        # print(type(case.data))
        resp = http_request.request(case.method, case.url, case.data)
        # print(resp.status_code)
        # print(resp.text)  # 响应文本
        resp_dict = resp.json()  # 返回字典
        print(resp_dict)
        # print(resp.text)
        # actual = resp.text("code")
        actual = resp_dict["code"]
コード例 #56
0
ファイル: utils.py プロジェクト: LiuZhenXian/hmm-ner
def load_data(path):
    f = open(path, encoding='utf-8')
    return [eval(line.strip()) for line in f]
コード例 #57
0
 def __getitem__(self, item):
     return eval("self.{}".format(item))
コード例 #58
0
 skip = False
 try:
     print "Converting " + file
     if options.websvc:
         sb = SwatchBook(websvc=options.websvc, webid=file)
     else:
         sb = SwatchBook(file, options.input)
 except FileFormatError:
     sys.stderr.write(file + ": unknown file format\n")
 except (IndexError, ValueError):
     sys.stderr.write(file + ": invalid palette id\n")
 else:
     filename = os.path.splitext(os.path.basename(file))[0]
     dir = options.dir or ""
     fileout = os.path.join(
         dir, filename) + "." + eval('codecs.' + options.output).ext[0]
     while os.path.exists(fileout):
         wtd = raw_input(fileout +
                         " exists. [O]verwrite, [S]kip or [R]ename? ")
         if wtd.lower() == "o":
             break
         elif wtd.lower() == "r":
             fileout = raw_input("New file name: ")
             if dir not in fileout:
                 fileout = os.path.join(dir, fileout)
         elif wtd.lower() == "s":
             skip = True
             break
     if not skip:
         try:
             sb.write(options.output, fileout)
コード例 #59
0
if config.norm:
    global_stats = StatsRecorder()
    with torch.no_grad():
        for idx, (x, y) in enumerate(iter(data.train)):
            # update normalization statistics
            global_stats.update(x)

    global_mean, global_std = global_stats.mean, global_stats.std
    print(global_mean, global_std)
    norm = SpecNormalize(global_mean, global_std, axes=(0, 2, 3))
    batch_tfms.append(norm)

    data = get_data(batch_tfms=batch_tfms,
                    sample_rate=config.sample_rate,
                    batch_size=config.batch_size,
                    fold=config.fold,
                    seed=config.trial_num)

arch = eval(config.arch)

learn = get_learner(data, arch, normalize=(not config.norm))

cbs = []
if config.mix_up:
    cbs.append(MixUp(config.mix_up))
cbs.append(WandbCallback(log_model=False, log_preds=False))

learn.fine_tune(config.n_epochs, base_lr=config.learning_rate, cbs=cbs)

wandb.finish()
コード例 #60
0
def step_impl(context, exeption):
    assert isinstance(context.exception, eval(exeption)), \
    "Invalid exception %s - expected %s" \
    % (type(context.exception).__name__, exeption)