def bootcd(self):
        "all nodes: invoke GetBootMedium and store result locally"
        utils.header("Calling GetBootMedium for {}".format(self.name()))
        # this would clearly belong in the config but, well ..
        options = self.node_spec['bootmedium_options'] if 'bootmedium_options' in self.node_spec else []
        encoded = self.test_plc.apiserver.GetBootMedium(
            self.test_plc.auth_root(), self.name(), 'node-iso', '', options)
        if encoded == '':
            raise Exception('GetBootmedium failed')

        filename = "{}/{}.iso".format(self.nodedir(), self.name())
        utils.header('Storing boot medium into {}'.format(filename))

        # xxx discovered with python3, but a long stading issue:
        # encoded at this point is a str instead of a bytes
        # Quick & dirty : we convert this explicitly to a bytearray
        # Longer run : clearly it seems like the plcapi server side should
        # tag its result with <base64></base64> rather than as a string
        bencoded = str.encode(encoded)
        if self.dry_run():
            print("Dry_run: skipped writing of iso image")
            return True
        else:
            # with python3 we need to call decodestring here
            with open(filename,'wb') as storage:
                storage.write(base64.decodestring(bencoded))
            return True
    def ssh_tasks(self,options, expected=True, command=None):
#                     timeout_minutes=20, silent_minutes=10, period_seconds=15):
#        timeout  = timedelta(minutes=timeout_minutes)
#        graceout = timedelta(minutes=silent_minutes)
#        period   = timedelta(seconds=period_seconds)
        if not command:
            command = "echo hostname ; hostname; echo id; id; echo uname -a ; uname -a"
        # locate a key
        private_key = self.locate_private_key()
        if not private_key :
            utils.header("WARNING: Cannot find a valid key for slice {}".format(self.name()))
            return False

        # convert nodenames to real hostnames
        if expected:    msg="ssh slice access enabled"
        else:           msg="ssh slice access disabled"
        utils.header("checking for {} -- slice {}".format(msg, self.name()))

        tasks=[]
        slicename=self.name()
        dry_run = getattr(options, 'dry_run', False)
        for nodename in self.slice_spec['nodenames']:
            site_spec, node_spec = self.test_plc.locate_node(nodename)
            tasks.append( CompleterTaskSliceSsh(self.test_plc, node_spec['node_fields']['hostname'],
                                                slicename, private_key, command, expected, dry_run))
        return tasks
Exemple #3
0
  def __finish(self):
    tests = self.__gather_tests
    # Normally you don't have to use Fore.BLUE before each line, but 
    # Travis-CI seems to reset color codes on newline (see travis-ci/travis-ci#2692)
    # or stream flush, so we have to ensure that the color code is printed repeatedly
    prefix = Fore.CYAN
    for line in header("Verification Summary", top='=', bottom='').split('\n'):
      print prefix + line
    for test in tests:
      print prefix + "| Test: %s" % test.name
      if test.name in self.results['verify'].keys():
        for test_type, result in self.results['verify'][test.name].iteritems():
          if result.upper() == "PASS":
            color = Fore.GREEN
          elif result.upper() == "WARN":
            color = Fore.YELLOW
          else:
            color = Fore.RED
          print prefix + "|       " + test_type.ljust(11) + ' : ' + color + result.upper()
      else:
        print prefix + "|      " + Fore.RED + "NO RESULTS (Did framework launch?)"
    print prefix + header('', top='', bottom='=') + Style.RESET_ALL

    print "Time to complete: " + str(int(time.time() - self.start_time)) + " seconds"
    print "Results are saved in " + os.path.join(self.result_directory, self.timestamp)
Exemple #4
0
    def scp_to_webroot(self, localfiles, recursive = False):
	if self.config.verbose:
	    utils.header("Copying %s to %s webroot" % (localfiles, self['name']), logfile = self.config.logfile)
	self.scp_to("%(localfiles)s" % locals(), "/var/www/html/")
	url = 'http://%s/%s' % (self['ip'], localfiles)	
	
	return url 
 def sfi_configure (self,dir_name):
     plc_spec=self.test_plc.plc_spec
     # cheat a bit: retrieve the global SFA spec from the plc obj
     sfa_spec=self.test_plc.plc_spec['sfa']
     # fetch keys in config spec and expose to sfi
     for spec_name in ['pi_spec','user_spec']:
         user_spec=self.auth_sfa_spec[spec_name]
         user_leaf=user_spec['name']
         key_name=user_spec['key_name']
         key_spec = self.test_plc.locate_key (key_name)
         for (kind,ext) in [ ('private', 'pkey'), ('public', 'pub') ] :
             contents=key_spec[kind]
             file_name=os.path.join(dir_name,self.obj_hrn(user_leaf))+"."+ext
             fileconf=open(file_name,'w')
             fileconf.write (contents)
             fileconf.close()
             utils.header ("(Over)wrote {}".format(file_name))
     #
     file_name=dir_name + os.sep + 'sfi_config'
     fileconf=open(file_name,'w')
     SFI_AUTH=self.auth_hrn()
     fileconf.write ("SFI_AUTH='{}'".format(SFI_AUTH))
     fileconf.write('\n')
     # default is to run as a PI
     SFI_USER=self.obj_hrn(self.auth_sfa_spec['pi_spec']['name'])
     fileconf.write("SFI_USER='******'".format(SFI_USER))
     fileconf.write('\n')
     SFI_REGISTRY='http://{}:{}/'.format(sfa_spec['settings']['SFA_REGISTRY_HOST'], 12345)
     fileconf.write("SFI_REGISTRY='{}'".format(SFI_REGISTRY))
     fileconf.write('\n')
     SFI_SM='http://{}:{}/'.format(sfa_spec['settings']['SFA_AGGREGATE_HOST'], sfa_spec['sfi-connects-to-port'])
     fileconf.write("SFI_SM='{}'".format(SFI_SM))
     fileconf.write('\n')
     fileconf.close()
     utils.header ("(Over)wrote {}".format(file_name))
 def run(self, command, message=None, background=False, dry_run=False):
     local_command = self.actual_command(command, dry_run=dry_run)
     if dry_run:
         utils.header("DRY RUN " + local_command)
         return 0
     else:
         self.header(message)
         return utils.system(local_command, background)
Exemple #7
0
    def is_ready(self, timeout=30):
	# Node is considered ready when Node Manager has started avuseradd processes have stopped
	log = self.config.logfile 
	class test:
	    def __init__(self, name, description, system, cmd, check, inverse = False, logfile = log):
	        self.system = system
		self.cmd = cmd
		self.check = check
		self.name = name
		self.description = description
		self.inverse = inverse
		self.logfile = logfile
 
	    def run(self, logfile, verbose = True):
		if verbose:
		    utils.header(self.description, logfile =  self.logfile)	
	        (status, output) = self.system(self.cmd)
		if self.inverse and output.find(self.check) == -1:
		    if verbose: utils.header("%s Passed Test" % self.name, logfile = self.logfile)
		    return True
		elif not self.inverse and output and output.find(self.check)  -1:		
		    if verbose: utils.header("%s Passed Test" % self.name, logfile = self.logfile)
		    return True
		
		if verbose: utils.header("%s Failed Test" % self.name, logfile = self.logfile)
	        return False

	ready = False
	start_time = time.time()
	end_time = start_time + 60 * timeout
	vcheck_cmd = "ps -elfy | grep vuseradd | grep -v grep"  
        grep_cmd = "grep 'Starting Node Manager' %s" % self.logfile.filename
	tests = {
	'1':  test("NodeManager", "Checking if NodeManager has started", utils.commands, grep_cmd, "OK", logfile = self.config.logfile),
	'2':  test("vuseradd", "Checking if vuseradd is done", self.commands, vcheck_cmd, "vuseradd", True, logfile = self.config.logfile)      
	}
	
	while time.time() < end_time and ready == False:
	    # Run tests in order
	    steps = tests.keys()
	    steps.sort()
	    results = {}
	    for step in steps:
		test = tests[step]
		results[step] = result = test.run(self.config.verbose)
		if not result: break
		        	   	 	
	    # Check results. We are ready if all passed 		
	    if not set(results.values()).intersection([False, None]):
		utils.header("%s is ready" % (self['hostname'])) 
		ready = True
	    else:
		if self.config.verbose:
		    utils.header("%s not ready. Waiting 30 seconds. %s seconds left" % \
				 (self['hostname'], int(end_time - time.time())) )
		time.sleep(30)   			

	return ready  
 def delete_nodes (self):
     auth = self.owner_auth()
     slice_name = self.slice_name()
     print('retrieving slice {}'.format(slice_name))
     slice=self.test_plc.apiserver.GetSlices(auth, slice_name)[0]
     node_ids=slice['node_ids']
     utils.header ("Deleting {} nodes from slice {}"\
                   .format(len(node_ids), slice_name))
     self.test_plc.apiserver.DeleteSliceFromNodes (auth, slice_name, node_ids)
 def host_box(self):
     if self.is_real():
         return 'localhost'
     else:
         try:
             return self.node_spec['host_box']
         except:
             utils.header("WARNING : qemu nodes need a host box")
             return 'localhost'
 def kill_qemu(self):
     #Prepare the log file before killing the nodes
     test_box = self.test_box()
     # kill the right processes 
     utils.header("Stopping qemu for node {} on box {}"\
                  .format(self.name(), self.test_box().hostname()))
     command = "{}/qemu-kill-node {}".format(self.nodedir(),self.name())
     self.test_box().run_in_buildname(command, dry_run=self.dry_run())
     return True
 def qemu_start(self):
     "all nodes: start the qemu instance (also runs qemu-bridge-init start)"
     model = self.node_spec['node_fields']['model']
     #starting the Qemu nodes before 
     if self.is_qemu():
         self.start_qemu()
     else:
         utils.header("TestNode.qemu_start : {} model {} taken as real node"\
                      .format(self.name(), model))
     return True
    def start_qemu(self):
        test_box = self.test_box()
        utils.header("Starting qemu node {} on {}".format(self.name(), test_box.hostname()))

        test_box.run_in_buildname("{}/qemu-bridge-init start >> {}/log.txt"\
                                  .format(self.nodedir(), self.nodedir()),
                                  dry_run=self.dry_run())
        # kick it off in background, as it would otherwise hang
        test_box.run_in_buildname("{}/qemu-start-node 2>&1 >> {}/log.txt"\
                                  .format(self.nodedir(), self.nodedir()))
 def add_nodes (self):
     auth = self.owner_auth()
     slice_name = self.slice_name()
     hostnames=[]
     for nodename in self.slice_spec['nodenames']:
         node_spec=self.test_site.locate_node(nodename)
         test_node=TestNode(self.test_plc, self.test_site, node_spec)
         hostnames += [test_node.name()]
     utils.header("Adding {} in {}".format(hostnames, slice_name))
     self.test_plc.apiserver.AddSliceToNodes(auth, slice_name, hostnames)
    def __run_tests(self, tests):
        if len(tests) == 0:
            return 0

        logging.debug("Start __run_tests.")
        logging.debug("__name__ = %s",__name__)

        error_happened = False
        if self.os.lower() == 'windows':
            logging.debug("Executing __run_tests on Windows")
            for test in tests:
                with open(self.current_benchmark, 'w') as benchmark_resume_file:
                    benchmark_resume_file.write(test.name)
                with self.quiet_out.enable():
                    if self.__run_test(test) != 0:
                        error_happened = True
        else:
            logging.debug("Executing __run_tests on Linux")

            # Setup a nice progressbar and ETA indicator
            widgets = [self.mode, ': ',  progressbar.Percentage(),
                       ' ', progressbar.Bar(),
                       ' Rough ', progressbar.ETA()]
            pbar = progressbar.ProgressBar(widgets=widgets, maxval=len(tests)).start()
            pbar_test = 0

            # These features do not work on Windows
            for test in tests:
                pbar.update(pbar_test)
                pbar_test = pbar_test + 1
                if __name__ == 'benchmark.benchmarker':
                    print header("Running Test: %s" % test.name)
                    with open(self.current_benchmark, 'w') as benchmark_resume_file:
                        benchmark_resume_file.write(test.name)
                    with self.quiet_out.enable():
                        test_process = Process(target=self.__run_test, name="Test Runner (%s)" % test.name, args=(test,))
                        test_process.start()
                        test_process.join(self.run_test_timeout_seconds)
                    self.__load_results()  # Load intermediate result from child process
                    if(test_process.is_alive()):
                        logging.debug("Child process for {name} is still alive. Terminating.".format(name=test.name))
                        self.__write_intermediate_results(test.name,"__run_test timeout (="+ str(self.run_test_timeout_seconds) + " seconds)")
                        test_process.terminate()
                        test_process.join()
                    if test_process.exitcode != 0:
                        error_happened = True
            pbar.finish()

        if os.path.isfile(self.current_benchmark):
            os.remove(self.current_benchmark)
        logging.debug("End __run_tests.")

        if error_happened:
            return 1
        return 0
 def qemu_export(self):
     "all nodes: push local node-dep directory on the qemu box"
     # if relevant, push the qemu area onto the host box
     if self.test_box().is_local():
         return True
     dry_run = self.dry_run()
     utils.header("Cleaning any former sequel of {} on {}"\
                  .format(self.name(), self.host_box()))
     utils.header("Transferring configuration files for node {} onto {}"\
                  .format(self.name(), self.host_box()))
     return self.test_box().copy(self.nodedir(), recursive=True, dry_run=dry_run) == 0
Exemple #16
0
 def is_local_hostname(hostname):
     if hostname == "localhost":
         return True
     import socket
     try:
         local_ip = socket.gethostbyname(socket.gethostname())
         remote_ip = socket.gethostbyname(hostname)
         return local_ip == remote_ip
     except:
         utils.header("WARNING : something wrong in is_local_hostname with hostname={}".format(hostname))
         return False
Exemple #17
0
    def update_api(self):
	# Set up API acccess
        # If plc is specified, find its configuration
        # and use its API
	self.update_ip()
	name, ip, port, path = self['name'], self['ip'], self['port'], self['api_path']
	if self.config.verbose:
	    utils.header("Updating %(name)s's api to https://%(ip)s:%(port)s/%(path)s" % locals(), logfile = self.config.logfile)   
	api_server = "https://%(ip)s:%(port)s/%(path)s" % locals()
	self.config.api = xmlrpclib.Server(api_server, allow_none = 1)
        self.config.api_type = 'xmlrpc'	
 def create_user (self):
     user_spec = self.user_spec
     fields = user_spec['user_fields']
     auth = self.test_plc.auth_root()
     utils.header('Adding user {} - roles {}'.format(fields['email'], user_spec['roles']))
     self.test_plc.apiserver.AddPerson(auth, fields)
     self.test_plc.apiserver.UpdatePerson(auth, fields['email'], {'enabled': True})
     for role in user_spec['roles']:
         self.test_plc.apiserver.AddRoleToPerson(auth,role,fields['email'])
     self.test_plc.apiserver.AddPersonToSite(auth,
                                             self.name(),
                                             self.test_site.name())
Exemple #19
0
 def store_key(self):
     pub = self.publicpath()
     priv = self.privatepath()
     utils.header("Storing key {} in {}".format(self.name(), pub))
     dir = os.path.dirname(pub)
     if not os.path.isdir(dir):
         os.mkdir(dir)
     with open(pub,"w") as f:
         f.write(self.key_spec['key_fields']['key'])
     with open(priv,"w") as f:
         f.write(self.key_spec['private'])
     os.chmod(priv,0o400)
     os.chmod(pub,0o444)
 def sfa_check_slice_plc (self, options):
     "check the slice has been created at the plc - all local nodes should be in slice"
     slice = self.test_plc.apiserver.GetSlices(self.test_plc.auth_root(), self.plc_name())[0]
     nodes = self.test_plc.apiserver.GetNodes(self.test_plc.auth_root(), {'peer_id':None})
     result = True
     for node in nodes: 
         if node['node_id'] in slice['node_ids']:
             utils.header("local node {} found in slice {}".format(node['hostname'], slice['name']))
         else:
             utils.header("ERROR - local node {} NOT FOUND in slice {}"\
                          .format(node['hostname'], slice['name']))
             result = False
     return result
    def create_node(self):
        ownername = self.node_spec['owner']
        user_spec = self.test_site.locate_user(ownername)
        test_user = TestUser(self.test_plc,self.test_site,user_spec)
        userauth = test_user.auth()
        utils.header("node {} created by user {}".format(self.name(), test_user.name()))
        rootauth = self.test_plc.auth_root()
        server  =  self.test_plc.apiserver
        node_id = server.AddNode(userauth,
                                 self.test_site.site_spec['site_fields']['login_base'],
                                 self.node_spec['node_fields'])
        # create as reinstall to avoid user confirmation
        server.UpdateNode(userauth, self.name(), { 'boot_state' : 'reinstall' })

        # you are supposed to make sure the tags exist
        for tagname, tagvalue in self.node_spec['tags'].items():
            server.AddNodeTag(userauth, node_id, tagname, tagvalue)
            
        if not self.test_plc.has_addresses_api():
#            print 'USING OLD INTERFACE'
            # populate network interfaces - primary
            server.AddInterface(userauth, self.name(),
                                self.node_spec['interface_fields'])
        else:
#            print 'USING NEW INTERFACE with separate ip addresses'
            # this is for setting the 'dns' stuff that now goes with the node
            server.UpdateNode(userauth, self.name(), self.node_spec['node_fields_nint'])
            interface_id = server.AddInterface(userauth, self.name(),self.node_spec['interface_fields_nint'])
            server.AddIpAddress(userauth, interface_id, self.node_spec['ipaddress_fields'])
            route_fields = self.node_spec['route_fields']
            route_fields['interface_id'] = interface_id
            server.AddRoute(userauth, node_id, self.node_spec['route_fields'])
            pass
        # populate network interfaces - others
        if 'extra_interfaces' in self.node_spec:
            for interface in self.node_spec['extra_interfaces']:
                server.AddInterface(userauth, self.name(), interface['interface_fields'])
                if 'settings' in interface:
                    for attribute, value in interface['settings'].items():
                        # locate node network
                        interface = server.GetInterfaces( userauth,
                                                          {'ip' : interface['interface_fields']['ip']})[0]
                        interface_id = interface['interface_id']
                        # locate or create node network attribute type
                        try:
                            interface_tagtype = server.GetTagTypes(userauth, {'name' : attribute})[0]
                        except:
                            interface_tagtype = server.AddTagType(rootauth,{'category' : 'test',
                                                                            'tagname' : attribute})
                        # attach value
                        server.AddInterfaceTag(userauth, interface_id, attribute, value)
Exemple #22
0
    def wget(self, url, targetdir, user = '******'):
        if self.config.verbose:
            utils.header("Downloading %(url)s to %(targetdir)s" % locals())

        cmd_prefix = ""
        if user not in ['root']:
            cmd_prefix = " su - user -c "
        fileparts = url.split(os.sep)
        filename = fileparts[-1:][0]
        cleanup_cmd = "%(cmd_prefix)s rm -f %(targetdir)s/%(filename)s" % locals()
        print >> self.logfile, cleanup_cmd
        self.commands(cleanup_cmd, False)

        wget_cmd = "%(cmd_prefix)s wget -nH -P %(targetdir)s %(url)s" % locals()
        print >> self.logfile, wget_cmd
        self.commands(wget_cmd)
Exemple #23
0
 def copy_abs(self, local_file, remote_file,
              recursive=False, dry_run=False):
     if self.is_local():
         dest = ""
     else:
         dest = "{}:".format(self.hostname_part())
     scp_command = "scp "
     scp_command += TestSsh.std_options
     if recursive:
         scp_command += "-r "
     scp_command += self.key_part()
     scp_command += "{} {}{}".format(local_file, dest, remote_file)
     if dry_run:
         utils.header("DRY RUN TestSsh.copy {}".format(scp_command))
         # need to be consistent with the non-dry-run mode
         return 0
     return utils.system(scp_command)
Exemple #24
0
    def archive_scripts(self, prefix):
	valid_prefix = ['slice', 'node'] 
	if prefix not in valid_prefix:
	    raise "Error. Invalid prefix %s. Must be in %s" %  (prefix, valid_prefix)

	scripts_dir = self.path + os.sep + 'tests' +os.sep + prefix + os.sep
	workdir = '/tmp' + os.sep	
	archive_path = workdir + os.sep + prefix + os.sep
	archive_filename = prefix + ".tar.gz"
  
	if self.verbose:
	    utils.header("Creating/Updating %s archive %s" % (prefix, archive_path + archive_filename), logfile = self.logfile)
	utils.commands("mkdir -p %(archive_path)s" % locals(), logfile = self.logfile) 	
	utils.commands("cp -Rf %(scripts_dir)s* %(archive_path)s" % locals(), logfile = self.logfile)
	tar_cmd = "cd %(workdir)s && tar -czf %(workdir)s/%(archive_filename)s %(prefix)s" % locals() 
	utils.commands(tar_cmd, logfile = self.logfile)
	return (archive_filename, workdir+archive_filename) 
Exemple #25
0
 def copy(self, local_file, recursive=False, dry_run=False):
     if self.is_local():
         return 0
     self.create_buildname_once(dry_run)
     scp_command = "scp "
     if not dry_run:
         scp_command += TestSsh.std_options
     if recursive:
         scp_command += "-r "
     scp_command += self.key_part()
     scp_command += "{} {}:{}/{}".format(local_file, self.hostname_part(),
                                         self.fullname(self.buildname),
                                         os.path.basename(local_file) or ".")
     if dry_run:
         utils.header("DRY RUN TestSsh.copy {}".format(scp_command))
         # need to be consistent with the non-dry-run mode
         return 0
     return utils.system(scp_command)
 def run (self, timeout_timedelta, silent_timedelta, period):
     begin = datetime.now()
     timeout = begin+timeout_timedelta
     timeout_seconds = timeout_timedelta.total_seconds()
     timeout_minutes = timeout_seconds/60
     graceout = datetime.now()+silent_timedelta
     silent_seconds = silent_timedelta.total_seconds()
     silent_minutes = silent_seconds/60
     period_seconds = int(period.total_seconds())
     if self.verbose:
         if timeout_seconds >= 120:
             utils.header("Completer [{} tasks]: max timeout is {} minutes, "
                          "silent for {} minutes (period is {} s)"\
                          .format(len(self.tasks), timeout_minutes,
                                  silent_minutes, period_seconds))
         else:
             utils.header("Completer [{} tasks]: max timeout is {} seconds, "
                          "silent for {} seconds (period is {} s)"\
                          .format(len(self.tasks), timeout_seconds,
                                  silent_seconds, period_seconds))
     tasks = self.tasks
     while tasks:
         fine = []
         for task in tasks:
             success = task.run (silent=datetime.now() <= graceout)
             if success:
                 fine.append(task)
         for task in fine:
             tasks.remove(task)
         if not tasks:
             if self.verbose:
                 duration = datetime.now() - begin
                 print("total completer {} {}s".format(self.message,
                                                       int(duration.total_seconds())))
             return True
         if datetime.now() > timeout:
             for task in tasks: 
                 task.failure_epilogue()
             return False
         if self.verbose:
             print('{}s..'.format(period_seconds), end=' ')
         time.sleep(period_seconds)
     # in case we're empty 
     return True
    def verify_type(test_type):
      with open(os.path.join(verificationPath, (test_type + '.txt')), 'w') as verification:
        test = self.runTests[test_type]
        test.setup_out(verification)
        verification.write(header("VERIFYING %s" % test_type.upper()))

        base_url = "http://%s:%s" % (self.benchmarker.server_host, self.port)

        try:
          results = test.verify(base_url)
        except ConnectionError as e:
          results = [('fail',"Server did not respond to request", base_url)]
          logging.warning("Verifying test %s for %s caused an exception: %s", test_type, self.name, e)
        except Exception as e:
          results = [('fail',"""Caused Exception in TFB
            This almost certainly means your return value is incorrect,
            but also that you have found a bug. Please submit an issue
            including this message: %s\n%s""" % (e, traceback.format_exc()),
            base_url)]
          logging.warning("Verifying test %s for %s caused an exception: %s", test_type, self.name, e)
          traceback.format_exc()

        test.failed = any(result == 'fail' for (result, reason, url) in results)
        test.warned = any(result == 'warn' for (result, reason, url) in results)
        test.passed = all(result == 'pass' for (result, reason, url) in results)

        def output_result(result, reason, url):
          specific_rules_url = "http://frameworkbenchmarks.readthedocs.org/en/latest/Project-Information/Framework-Tests/#specific-test-requirements"
          color = Fore.GREEN
          if result.upper() == "WARN":
            color = Fore.YELLOW
          elif result.upper() == "FAIL":
            color = Fore.RED

          verification.write(("   " + color + "%s" + Style.RESET_ALL + " for %s\n") % (result.upper(), url))
          print ("   " + color + "%s" + Style.RESET_ALL + " for %s\n") % (result.upper(), url)
          if reason is not None and len(reason) != 0:
            for line in reason.splitlines():
              verification.write("     " + line + '\n')
              print "     " + line
            if not test.passed:
              verification.write("     See %s\n" % specific_rules_url)
              print "     See %s\n" % specific_rules_url

        [output_result(r1,r2,url) for (r1, r2, url) in results]

        if test.failed:
          self.benchmarker.report_verify_results(self, test_type, 'fail')
        elif test.warned:
          self.benchmarker.report_verify_results(self, test_type, 'warn')
        elif test.passed:
          self.benchmarker.report_verify_results(self, test_type, 'pass')
        else:
          raise Exception("Unknown error - test did not pass,warn,or fail")

        verification.flush()
    def verify_type(test_type):
      with open(os.path.join(verificationPath, (test_type + '.txt')), 'w') as verification:
        test = self.runTests[test_type]
        test.setup_out(verification)
        verification.write(header("VERIFYING %s" % test_type.upper()))

        base_url = "http://%s:%s" % (self.benchmarker.server_host, self.port)

        try:
          results = test.verify(base_url)
        except ConnectionError as e:
          results = [('fail',"Server did not respond to request", base_url)]
          logging.warning("Verifying test %s for %s caused an exception: %s", test_type, self.name, e)
        except Exception as e:
          results = [('fail',"""Caused Exception in TFB
            This almost certainly means your return value is incorrect,
            but also that you have found a bug. Please submit an issue
            including this message: %s\n%s""" % (e, traceback.format_exc()),
            base_url)]
          logging.warning("Verifying test %s for %s caused an exception: %s", test_type, self.name, e)
          traceback.format_exc()

        test.failed = any(result == 'fail' for (result, reason, url) in results)
        test.warned = any(result == 'warn' for (result, reason, url) in results)
        test.passed = all(result == 'pass' for (result, reason, url) in results)

        def output_result(result, reason, url):
          specific_rules_url = "http://frameworkbenchmarks.readthedocs.org/en/latest/Project-Information/Framework-Tests/#specific-test-requirements"
          color = Fore.GREEN
          if result.upper() == "WARN":
            color = Fore.YELLOW
          elif result.upper() == "FAIL":
            color = Fore.RED

          verification.write(("   " + color + "%s" + Style.RESET_ALL + " for %s\n") % (result.upper(), url))
          print ("   " + color + "%s" + Style.RESET_ALL + " for %s\n") % (result.upper(), url)
          if reason is not None and len(reason) != 0:
            for line in reason.splitlines():
              verification.write("     " + line + '\n')
              print "     " + line
            if not test.passed:
              verification.write("     See %s\n" % specific_rules_url)
              print "     See %s\n" % specific_rules_url

        [output_result(r1,r2,url) for (r1, r2, url) in results]

        if test.failed:
          self.benchmarker.report_verify_results(self, test_type, 'fail')
        elif test.warned:
          self.benchmarker.report_verify_results(self, test_type, 'warn')
        elif test.passed:
          self.benchmarker.report_verify_results(self, test_type, 'pass')
        else:
          raise Exception("Unknown error - test did not pass,warn,or fail")

        verification.flush()
Exemple #29
0
def analysis():
    """PDB Tools menu"""
    utils.header(__VERSION__)
    print("""
1) Proximity Analysis           (PDB)
2) Catalytic Pocket Selector    (PDB)

b) Back to Main Menu
q) Exit
    """)
    choice = input("\nSelection:\t")
    if choice == "1":
        nei_fin.main()
    elif choice == "2":
        pro_pru.main()
    elif choice == "b":
        main()
    elif choice == "q":
        sys.exit()
Exemple #30
0
	    def run(self, logfile, verbose = True):
		if verbose:
		    utils.header(self.description, logfile =  self.logfile)	
	        (status, output) = self.system(self.cmd)
		if self.inverse and output.find(self.check) == -1:
		    if verbose: utils.header("%s Passed Test" % self.name, logfile = self.logfile)
		    return True
		elif not self.inverse and output and output.find(self.check)  -1:		
		    if verbose: utils.header("%s Passed Test" % self.name, logfile = self.logfile)
		    return True
		
		if verbose: utils.header("%s Failed Test" % self.name, logfile = self.logfile)
	        return False
    def run(self):
        ##########################
        # Generate metadata
        ##########################
        self.run_list_test_metadata()
        ##########################
        # Get a list of all known
        # tests that we can run.
        ##########################
        all_tests = self.__gather_tests
        ##########################
        # Setup client/server
        ##########################
        print(header("Preparing Server, Database, and Client ...", top='=', bottom='='))
        with self.quiet_out.enable():
            self.__setup_server()
            self.__setup_database()
            self.__setup_client()

        ## Check if wrk (and wrk-pipeline) is installed and executable, if not, raise an exception
        #if not (os.access("/usr/local/bin/wrk", os.X_OK) and os.access("/usr/local/bin/wrk-pipeline", os.X_OK)):
        #  raise Exception("wrk and/or wrk-pipeline are not properly installed. Not running tests.")

        ##########################
        # Run tests
        ##########################
        print(header("Running Tests...", top='=', bottom='='))
        result = self.__run_tests(all_tests)

        ##########################
        # Parse results
        ##########################
        if self.mode == "benchmark":
            print(header("Parsing Results ...", top='=', bottom='='))
            self.__parse_results(all_tests)


        self.__set_completion_time()
        self.__upload_results()
        self.__finish()
        return result
 def ssh_slice_sfa(self, options, timeout_minutes=40, silent_minutes=0, period_seconds=15):
     "tries to ssh-enter the SFA slice"
     timeout  = timedelta(minutes=timeout_minutes)
     graceout = timedelta(minutes=silent_minutes)
     period   = timedelta(seconds=period_seconds)
     # locate a key
     private_key=self.locate_private_key()
     if not private_key :
         utils.header("WARNING: Cannot find a valid key for slice {}".format(self.hrn()))
         return False
     command="echo hostname ; hostname; echo id; id; echo uname -a ; uname -a"
     
     tasks = []
     slicename = self.plc_name()
     dry_run = getattr(options,'dry_run',False)
     for nodename in self.slice_spec['nodenames']:
         (site_spec,node_spec) = self.test_plc.locate_node(nodename)
         tasks.append( CompleterTaskSliceSsh(self.test_plc, node_spec['node_fields']['hostname'],
                                             slicename, private_key, command,
                                             expected=True, dry_run=dry_run))
     return Completer (tasks, message='ssh_slice_sfa').run(timeout, graceout, period)
Exemple #33
0
  def run(self):
    ##########################
    # Get a list of all known
    # tests that we can run.
    ##########################    
    all_tests = self.__gather_tests

    ##########################
    # Setup client/server
    ##########################
    print header("Preparing Server, Database, and Client ...", top='=', bottom='=')
    self.__setup_server()
    self.__setup_database()
    self.__setup_client()

    ## Check if wrk (and wrk-pipeline) is installed and executable, if not, raise an exception
    #if not (os.access("/usr/local/bin/wrk", os.X_OK) and os.access("/usr/local/bin/wrk-pipeline", os.X_OK)):
    #  raise Exception("wrk and/or wrk-pipeline are not properly installed. Not running tests.")

    ##########################
    # Run tests
    ##########################
    print header("Running Tests...", top='=', bottom='=')
    result = self.__run_tests(all_tests)

    ##########################
    # Parse results
    ##########################  
    if self.mode == "benchmark":
      print header("Parsing Results ...", top='=', bottom='=')
      self.__parse_results(all_tests)

    self.__finish()
    return result
Exemple #34
0
def main():
    utils.header(__VERSION__)
    print("""
1) Trajectory Manipulation
2) PDB Tools
3) Others
4) Molecular properties and vibrations

q) Exit
    """)

    choice = input("\nSelection:\t")
    if choice == "1":
        data_manipulation()
    elif choice == "2":
        analysis()
    elif choice == "3":
        input_generation()
    elif choice == "4":
        molecule_interface()
    elif choice == "q":
        sys.exit()
def main():
    load_bank_data()
    cleaner()
    header()
    account_auth = auth_account()

    if account_auth:
        cleaner()
        header()
        welcome(account_auth)

        while True:
            option_typed = get_menu_options_typed(account_auth)

            do_operation(option_typed, account_auth)

            print()
            loop_typed = input('Deseja sair da conta S/n? ')
            if not loop_typed == 'n':
                print('Bye')
                break
    else:
        print('Conta inválida')
 def __stop_test(self, test, out):
     # self.__process may not be set if the user hit ctrl+c prior to the test
     # starting properly.
     if self.__process is not None:
         out.write(header("Stopping %s" % test.name))
         out.flush()
         # Ask TFBReaper to nicely terminate itself
         self.__process.terminate()
         slept = 0
         returnCode = None
         # Check once a second to see if TFBReaper has exited
         while(slept < 30 and returnCode is None):
             time.sleep(1)
             slept += 1
             returnCode = self.__process.poll()
         
         # If TFBReaper has not exited at this point, we have a problem
         if returnCode is None:
             self.__write_intermediate_results(test.name, "port " + str(test.port) + " was not released by stop")
             out.write(header("Error: Port %s was not released by stop - %s" % (test.port, test.name)))
             out.write(header("Running Processes"))
             out.write(subprocess.check_output(['ps -aux'], shell=True))
             out.flush()
             return exit_with_code(1)
        def verify_type(test_type):

            test = self.runTests[test_type]
            test.setup_out_err(out, err)
            out.write(header("VERIFYING %s" % test_type.upper()))

            base_url = "http://%s:%s" % (self.benchmarker.server_host,
                                         self.port)

            try:
                results = test.verify(base_url)
            except Exception as e:
                results = [('fail', """Caused Exception in TFB
          This almost certainly means your return value is incorrect, 
          but also that you have found a bug. Please submit an issue
          including this message: %s\n%s""" % (e, traceback.format_exc()),
                            base_url)]
                logging.warning(
                    "Verifying test %s for %s caused an exception: %s",
                    test_type, self.name, e)
                traceback.format_exc()

            test.failed = any(result is 'fail'
                              for (result, reason, url) in results)
            test.warned = any(result is 'warn'
                              for (result, reason, url) in results)
            test.passed = all(result is 'pass'
                              for (result, reason, url) in results)

            def output_result(result, reason, url):
                out.write("   %s for %s\n" % (result.upper(), url))
                print "   %s for %s" % (result.upper(), url)
                if reason is not None and len(reason) != 0:
                    for line in reason.splitlines():
                        out.write("     " + line + '\n')
                        print "     " + line

            [output_result(r1, r2, url) for (r1, r2, url) in results]

            if test.failed:
                self.benchmarker.report_verify_results(self, test_type, 'fail')
            elif test.warned:
                self.benchmarker.report_verify_results(self, test_type, 'warn')
            elif test.passed:
                self.benchmarker.report_verify_results(self, test_type, 'pass')
            else:
                raise Exception(
                    "Unknown error - test did not pass,warn,or fail")
def text_generator(seed,
                   unconditional=False,
                   nsamples=1,
                   batch_size=-1,
                   length=-1,
                   temperature=0.7,
                   top_k=40):

    enc = get_encoder()
    context_tokens = enc.encode(seed)

    if batch_size == -1:
        batch_size = 1
    assert nsamples % batch_size == 0

    if length == -1:
        length = config.n_ctx // 2
    elif length > config.n_ctx:
        raise ValueError("Can't get samples longer than window size: %s" %
                         config.n_ctx)

    out = sample_sequence(
        model=model,
        length=length,
        context=context_tokens if not unconditional else None,
        start_token=enc.encoder['<|endoftext|>'] if unconditional else None,
        batch_size=batch_size,
        temperature=temperature,
        top_k=top_k,
        device=device)

    text = ''

    out = out[:, len(context_tokens):].tolist()
    for i in range(batch_size):
        text += enc.decode(out[i])

    html = ''
    html = add_content(
        html, header('Input Seed ', color='black', gen_text='Network Output'))
    html = add_content(html, box(seed, text))
    return f'<div>{html}</div>'
Exemple #39
0
def login():
    if "username" in session and session["username"] != "":
        return "<script>window.location='/'</script>"  #FIX

    header = utils.header("")
    footer = utils.footer()

    if request.method == 'POST':
        return_code = user_actions.login(request.form["username"],
                                         request.form["password"])
        if return_code == 1:
            session["username"] = request.form["username"]
            return "<script>window.location='/'</script>"  #FIX
        else:
            return render_template("login.html",
                                   return_code=return_code,
                                   header=header,
                                   footer=footer)
    else:
        return render_template("login.html", header=header, footer=footer)
Exemple #40
0
def generate_output(s, words_to_generate=50, diversity=0.75):
    """Generate output from a sequence"""
    # Mapping of words to integers
    word_idx = json.load(open('../data/word-index.json'))
    idx_word = {idx: word for word, idx in word_idx.items()}

    # Original formated text
    start = format_sequence(s).split()
    gen = []
    s = start[:]

    with graph.as_default():

        # Generate output
        for i in range(words_to_generate):
            # Conver to array
            x = np.array([word_idx.get(word, 0) for word in s]).reshape(
                (1, -1))

            # Make predictions
            preds = model.predict(x)[0].astype(float)

            # Diversify
            preds = np.log(preds) / diversity
            exp_preds = np.exp(preds)
            # Softmax
            preds = exp_preds / np.sum(exp_preds)

            # Pick next index
            next_idx = np.argmax(np.random.multinomial(1, preds, size=1))
            s.append(idx_word[next_idx])
            gen.append(idx_word[next_idx])

    # Formatting in html
    start = remove_spaces(' '.join(start)) + ' '
    gen = remove_spaces(' '.join(gen))
    html = ''
    html = addContent(
        html, header('Input Seed ', color='black', gen_text='Network Output'))
    html = addContent(html, box(start, gen))
    return html
Exemple #41
0
import pandas as pd
from utils import wdir,ddir,header,normalize_txt

df = pd.read_csv(ddir+'rayon.csv',sep=';')
df['txt'] = df.Categorie1_Name+' '+df.Categorie2_Name+' '+df.Categorie3_Name
df.txt = df.apply(lambda r:normalize_txt(r.txt),axis=1)
dfrayon = df
wrayon = set((' '.join(list(dfrayon.txt))).split())

df = pd.read_csv(ddir+'training_shuffled_normed.csv',sep=';',names=header())
df['txt'] = (df.Marque+' ')*3+(df.Libelle+' ')*2+df.Description*1
dftrain = df
wtrain = set((' '.join(list(dftrain.txt))).split())

df = pd.read_csv(ddir+'test_normed.csv',sep=';',names=header(test=True))
df['txt'] = [df.Marque]*3+[df.libelle]*2+[df.Description]*1
dftest = df
wtest = set((' '.join(list(dftest.txt))).split())

df = pd.read_csv(ddir+'validation_normed.csv',sep=';',names=header())
df['txt'] = [df.Marque]*3+[df.libelle]*2+[df.Description]*1
dfvalid = df
wvalid = set((' '.join(list(dfvalid.txt))).split())

Exemple #42
0
# create sample set 
# from training set
#####################

# NOTE : USE analyse_test.py and perfect_sampling.py to get perfect training & validation set
# NOTE : USE analyse_test.py and perfect_sampling.py to get perfect training & validation set
# NOTE : USE analyse_test.py and perfect_sampling.py to get perfect training & validation set
# NOTE : USE analyse_test.py and perfect_sampling.py to get perfect training & validation set

#######################
# training
# stage1 : Categorie1 
# stage3 : Categorie3|Categorie1
#######################

dftrain = pd.read_csv(ddir+'training_perfect_200.csv',sep=';',names = header()).fillna('')
dfvalid = pd.read_csv(ddir+'validation_perfect.csv',sep=';',names = header()).fillna('')
dftest = pd.read_csv(ddir+'test_normed.csv',sep=';',names = header(test=True)).fillna('')

add_txt(dftrain)
add_txt(dfvalid)
add_txt(dftest)

dftrain = dftrain[['Categorie3','Categorie1','txt']]
dfvalid = dfvalid[['Categorie3','Categorie1','txt']]
dftest = dftest[['Identifiant_Produit','txt']]

# training stage1

dt = -time.time()
sct,scv = training_stage1(dftrain,dfvalid)
Exemple #43
0
        utils.header(__VERSION__)
        list_molecules()
        input("\nPress enter to return to the menu ...   ")
        molecule_interface()
    elif choice == "b":
        main()
    elif choice == "q":
        sys.exit()


MOL_LIST = []

if len(sys.argv) == 1:
    main()
else:
    utils.header(__VERSION__)
    if sys.argv[1].split(".")[-1] == "interp":
        print(
            "\nQUICK MODE:\tDetected .interp file. Opening as fast as I can..."
        )
        neb_vis.main(sys.argv[1])
    elif sys.argv[1].split(".")[-1] == "out":
        print("\nQUICK MODE:\tDetected .out file. Opening as fast as I can...")
        out_sum.main(sys.argv[1])
    elif sys.argv[1] == "load":
        print(
            "\nQUICK MODE:\tDetected load instruction.\nLoading .xyz files...")
        print("------------------------------------------------")
        for i, argument in enumerate(sys.argv):
            if i > 1 and argument.split(".")[-1] == "xyz":
                MOL_LIST.append(mol.MOL(argument))
Exemple #44
0
    if Xv.shape[0]==0:
        scv = -1
    else:
        scv = cla.score(Xv,Yv)
    print 'Stage 3.'+str(i)+':',cat,'score',sct,scv
    joblib.dump((labels,cla),fname)
    del cla
    return (sct,scv)

#################################################
# SAMPLING START HERE
#################################################


print 'loading...'
dftrain = pd.read_csv(ddir+'training_shuffled_normed.csv',sep=';',names = header()).fillna('')
print 'txting...'
add_txt(dftrain)

dfsample = get_sample(dftrain)
dfsample = dfsample[['Identifiant_Produit','Categorie3','Categorie1','txt']]

dfsample.to_csv(ddir+'training_sup9.csv',sep=';',index=False,header=False)

Y = dfsample.Categorie3.values
ID = dfsample.Identifiant_Produit.values
print 'vectorizing...'
vec,X = vectorizer(dfsample.txt)
print 'dumping...'
joblib.dump((vec,ID,X,Y),ddir+'joblib/vecIDXY')
Exemple #45
0
def molecule_interface():
    """Molecule class menu"""
    def ask_ID():
        ID = int(
            input("Select the molecule ID (from 1 to " + str(len(MOL_LIST)) +
                  "):\t"))
        if ID < 1 and ID > len(MOL_LIST):
            print("ERROR: The selected ID does not exist")
            molecule_interface()
        print("Selected ID: " + str(ID) + "\tMolecule name: " +
              str(MOL_LIST[ID - 1].name) + "\n")
        return ID

    def list_molecules():
        print("\n            LIST OF LOADED MOLECULES")
        print("------------------------------------------------\n")
        print("ID\tNAME\n")
        for i, myclass in enumerate(MOL_LIST):
            print(str(i + 1) + "\t" + myclass.name)

    utils.header(__VERSION__)
    print("""
1) Load molecule from .xyz file
2) Load vibrational data from .hess file (ORCA)
3) Plot IR spectra
4) Compute properties
5) Linear transit

l) List loaded molecules

b) Back to Main Menu
q) Exit
    """)
    choice = input("\nSelection:\t")
    if choice == "1":
        utils.header(__VERSION__)
        path = input("\nSelect the path of the .xyz file:\t")
        MOL_LIST.append(mol.MOL(path))
        print("\nMolecule saved with the ID:\t" + str(len(MOL_LIST)))
        input("\nPress enter to return to the menu ...   ")
        molecule_interface()
    elif choice == "2":
        utils.header(__VERSION__)
        path = input("\nSelect the path of the .hess file:\t")
        ID = ask_ID()
        MOL_LIST[ID - 1].load_hess_file(path, verbose=True)
        input("\nPress enter to return to the menu ...   ")
        molecule_interface()
    elif choice == "3":
        utils.clear()
        print("================================================")
        print("               VIBRATIONAL SPECTRUM")
        print("================================================\n")
        ID = ask_ID()
        type_of_plot = input(
            "Select the type of plot (gaussian, lorentzian, bar):\t")
        if type_of_plot == "gaussian" or type_of_plot == "lorentzian":
            width = input("Select the amplitude parameter (in cm^-1): ")
            mol.plot_ir_spectrum(MOL_LIST[ID - 1], width, style=type_of_plot)
            save_state = input("\nDo you want to save the plot (y/n):\t")
            if save_state == "y":
                path = input("Select the path to the destination folder:\t")
                mol.plot_ir_spectrum(MOL_LIST[ID - 1],
                                     width,
                                     style=type_of_plot,
                                     path=path,
                                     show=False)
        else:
            mol.plot_ir_spectrum(MOL_LIST[ID - 1], style=type_of_plot)
            save_state = input("\nDo you want to save the plot (y/n):\t")
            if save_state == "y":
                path = input("Select the path to the destination folder:\t")
                mol.plot_ir_spectrum(MOL_LIST[ID - 1],
                                     style=type_of_plot,
                                     path=path,
                                     show=False)
        molecule_interface()
    elif choice == "4":
        utils.clear()
        print("================================================")
        print("       MOLECULAR PROPERTIES COMPUTATION")
        print("================================================\n")
        print("""
1) General informations

b) Back
q) Exit
        """)
        calculation_choice = input("\nSelection:\t")
        if calculation_choice == "1":
            utils.clear()
            print("================================================")
            print("               GENERAL PROPERTIES")
            print("================================================")
            list_molecules()
            print("\n================================================")
            molecule_selection = input("""
Select the ID of the molecules divided by comma
(type "all" if you want all the entries):\t""")
            if molecule_selection == "all":
                molecule_selection = list(range(0, len(MOL_LIST)))
            else:
                molecule_selection = [
                    int(i) for i in molecule_selection.split(",")
                ]
            utils.clear()
            print("================================================")
            print("               GENERAL PROPERTIES")
            print("================================================")
            print("\nSelected molecules: ", molecule_selection)
            for myID in molecule_selection:
                myMol = MOL_LIST[myID - 1]
                print("\n Molecule: " + myMol.name + "\t(ID: " + str(myID) +
                      ")")
                print("------------------------------------------------\n")
                print("Composition:")
                comp_list = myMol.composition()
                print("ATOM\tN\t%MASS")
                for line in comp_list:
                    print(
                        str(line[0]) + "\t" + str(line[1]) + "\t" +
                        str(line[2]))
                print("\n")
                print("Center of mass (Angstrom):")
                print("\tx:\t" + str(myMol.rcm()[0]))
                print("\ty:\t" + str(myMol.rcm()[1]))
                print("\tz:\t" + str(myMol.rcm()[2]))
                print("\nMoment of Inertia (amu*Angstrom**2):")
                inert = myMol.inertia_tensor()
                print("\t{:.3e}\t{:.3e}\t{:.3e}".format(
                    inert[0][0][0], inert[0][0][1], inert[0][0][2]))
                print("\t{:.3e}\t{:.3e}\t{:.3e}".format(
                    inert[0][1][0], inert[0][1][1], inert[0][1][2]))
                print("\t{:.3e}\t{:.3e}\t{:.3e}".format(
                    inert[0][2][0], inert[0][2][1], inert[0][2][2]))
                print("\nEigenvalues:")
                print("\t{:.3e}\t{:.3e}\t{:.3e}".format(
                    inert[1][0], inert[1][1], inert[1][2]))
                print("\nRotor type: {}".format(inert[2]))
            input("\nPress enter to return to the menu ...   ")
            molecule_interface()
        elif calculation_choice == "b":
            molecule_interface()
        elif calculation_choice == "q":
            sys.exit()
    elif choice == "5":
        utils.clear()
        print("================================================")
        print("              LINEAR TRANSIT MODULE")
        print("================================================")
        print("Select the ID for the start and end structures\n")
        ID_start = ask_ID()
        ID_end = ask_ID()
        n_steps = int(input("Select the number of intermediate structures:\t"))
        path = input(
            "Select the destination path (without final extension):\t")
        print('\n')
        mol.rigid_linear_transit(MOL_LIST[ID_start - 1], MOL_LIST[ID_end - 1],
                                 n_steps, path)
        molecule_interface()
    elif choice == "l":
        utils.header(__VERSION__)
        list_molecules()
        input("\nPress enter to return to the menu ...   ")
        molecule_interface()
    elif choice == "b":
        main()
    elif choice == "q":
        sys.exit()
        Xv = vec.transform(dfv.txt)
        Yv = dfv['Categorie3'].values
        scv = cla.score(Xv,Yv)
    print 'training',cat1,'\t\t(',i,') : N=',len(df),'K=',len(labels)
    print 'training',cat1,'\t\t(',i,') : training=',sct,'validation=',scv
    joblib.dump((labels,vec,cla),fname)
    del vec,cla
    return (sct,scv)

#######################
# training
# stage1 : Categorie1 
# stage3 : Categorie3|Categorie1
#######################

dftrain = pd.read_csv(ddir+'training_sample.csv'+ext,sep=';',names = header()).fillna('')
dfvalid = pd.read_csv(ddir+'validation_sample.csv'+ext,sep=';',names = header()).fillna('')
dftest = pd.read_csv(ddir+'test_normed.csv',sep=';',names = header(test=True)).fillna('')

add_txt(dftrain)
add_txt(dfvalid)
add_txt(dftest)

dftrain = dftrain[['Categorie3','Categorie1','txt']]
dfvalid = dfvalid[['Categorie3','Categorie1','txt']]
dftest = dftest[['Identifiant_Produit','txt']]


# training stage1

dt = -time.time()
Exemple #47
0
        cla.fit(X,Y)
        scv = (-1,0)
    else:
        # performs a gridsearch
        Xvs = [ vec.transform(dfv.txt) for dfv in dfvs]
        Yvs = [ dfv['Categorie3'].values for dfv in dfvs]
        cla,scv = best_classifier(X,Y,Xvs,Yvs)
    print 'training',cat1,'\t\t(',i,') N=',len(dft),'K=',len(labels),': mean =',scv[0],'dev=',scv[1]
    joblib.dump((labels,vec,cla,scv),fname+ext)
    del vec,cla
    return scv

#################
# prepare train #
#################
dftrain = pd.read_csv(ddir+'training_random.csv'+ext,sep=';',names = header()).fillna('')
add_txt(dftrain)
dftrain = dftrain[['Categorie3','Categorie1','txt']]

#################
# prepare valid #
#################
dfvs = [pd.read_csv(ddir+'validation_random.csv.'+str(i),sep=';',names = header()).fillna('') for i in range(9)]
for i in range(9):
    add_txt(dfvs[i])
    dfvs[i] = dfvs[i][['Identifiant_Produit','Categorie3','Categorie1','txt']]

#################
# prepare test  #
#################
Exemple #48
0
# from training set
#####################

# NOTE : reference model is limited to ~1M rows balanced train set with ~4500 unique Categorie3 labels
#
# dftrain = pd.read_csv(ddir+'training_shuffled_normed.csv',sep=';',names = header()).fillna('')
# create_sample(dftrain,'Categorie3',200,10)     #~1M rows
# del dftrain

#######################
# training
# stage1 : Categorie1 
# stage3 : Categorie3|Categorie1
#######################

dftrain = pd.read_csv(ddir+'training_sampled_Categorie3_200.csv',sep=';',names = header()).fillna('')
dfvalid = pd.read_csv(ddir+'validation_normed.csv',sep=';',names = header()).fillna('')
dftest = pd.read_csv(ddir+'test_normed.csv',sep=';',names = header(test=True)).fillna('')

add_txt(dftrain)
add_txt(dfvalid)
add_txt(dftest)

dftrain = dftrain[['Categorie3','Categorie1','txt']]
dfvalid = dfvalid[['Categorie3','Categorie1','txt']]
dftest = dftest[['Identifiant_Produit','txt']]

# training stage1

dt = -time.time()
vec,sct,scv = training_stage1(dftrain,dfvalid)
    def __run_test(self, test):

        # Used to capture return values
        def exit_with_code(code):
            if self.os.lower() == 'windows':
                return code
            else:
                sys.exit(code)

        logDir = os.path.join(self.full_results_directory(), test.name.lower())
        try:
            os.makedirs(logDir)
        except Exception:
            pass
        with open(os.path.join(logDir, 'out.txt'), 'w') as out:

            if test.os.lower() != self.os.lower() or test.database_os.lower(
            ) != self.database_os.lower():
                out.write(
                    "OS or Database OS specified in benchmark_config.json does not match the current environment. Skipping.\n"
                )
                return exit_with_code(0)

            # If the test is in the excludes list, we skip it
            if self.exclude != None and test.name in self.exclude:
                out.write(
                    "Test {name} has been added to the excludes list. Skipping.\n"
                    .format(name=test.name))
                return exit_with_code(0)

            out.write(
                "test.os.lower() = {os}  test.database_os.lower() = {dbos}\n".
                format(os=test.os.lower(), dbos=test.database_os.lower()))
            out.write("self.results['frameworks'] != None: {val}\n".format(
                val=str(self.results['frameworks'] != None)))
            out.write("test.name: {name}\n".format(name=str(test.name)))
            out.write("self.results['completed']: {completed}\n".format(
                completed=str(self.results['completed'])))
            if self.results['frameworks'] != None and test.name in self.results[
                    'completed']:
                out.write(
                    'Framework {name} found in latest saved data. Skipping.\n'.
                    format(name=str(test.name)))
                print 'WARNING: Test {test} exists in the results directory; this must be removed before running a new test.\n'.format(
                    test=str(test.name))
                return exit_with_code(1)
            out.flush()

            out.write(header("Beginning %s" % test.name, top='='))
            out.flush()

            ##########################
            # Start this test
            ##########################
            out.write(header("Starting %s" % test.name))
            out.flush()
            try:
                self.__cleanup_leftover_processes_before_test()

                if self.__is_port_bound(test.port):
                    time.sleep(60)

                if self.__is_port_bound(test.port):
                    # We gave it our all
                    self.__write_intermediate_results(
                        test.name, "port " + str(test.port) +
                        " is not available before start")
                    out.write(
                        header(
                            "Error: Port %s is not available, cannot start %s"
                            % (test.port, test.name)))
                    out.flush()
                    print "Error: Unable to recover port, cannot start test"
                    return exit_with_code(1)

                result, process = test.start(out)
                self.__process = process
                if result != 0:
                    self.__process.terminate()
                    time.sleep(5)
                    out.write("ERROR: Problem starting {name}\n".format(
                        name=test.name))
                    out.flush()
                    self.__write_intermediate_results(
                        test.name, "<setup.py>#start() returned non-zero")
                    return exit_with_code(1)

                logging.info(
                    "Sleeping %s seconds to ensure framework is ready" %
                    self.sleep)
                time.sleep(self.sleep)

                ##########################
                # Verify URLs
                ##########################
                if self.mode == "debug":
                    logging.info(
                        "Entering debug mode. Server has started. CTRL-c to stop."
                    )
                    while True:
                        time.sleep(1)
                else:
                    logging.info("Verifying framework URLs")
                    passed_verify = test.verify_urls(logDir)

                ##########################
                # Benchmark this test
                ##########################
                if self.mode == "benchmark":
                    logging.info("Benchmarking")
                    out.write(header("Benchmarking %s" % test.name))
                    out.flush()
                    test.benchmark(logDir)

                ##########################
                # Stop this test
                ##########################
                self.__stop_test(test, out)

                out.write(header("Stopped %s" % test.name))
                out.flush()

                ##########################################################
                # Remove contents of  /tmp folder
                ##########################################################
                try:
                    subprocess.check_call('sudo rm -rf /tmp/*',
                                          shell=True,
                                          stderr=out,
                                          stdout=out)
                except Exception:
                    out.write(header("Error: Could not empty /tmp"))

                ##########################################################
                # Remove apt sources to avoid pkg errors and collisions
                ##########################################################
                os.system("sudo rm -rf /etc/apt/sources.list.d/*")

                ##########################################################
                # Save results thus far into the latest results directory
                ##########################################################

                out.write(header("Saving results through %s" % test.name))
                out.flush()
                self.__write_intermediate_results(
                    test.name, time.strftime("%Y%m%d%H%M%S", time.localtime()))

                ##########################################################
                # Upload the results thus far to another server (optional)
                ##########################################################

                self.__upload_results()

                if self.mode == "verify" and not passed_verify:
                    print "Failed verify!"
                    return exit_with_code(1)
            except KeyboardInterrupt:
                self.__stop_test(test, out)
            except (OSError, IOError, subprocess.CalledProcessError) as e:
                self.__write_intermediate_results(
                    test.name, "<setup.py> raised an exception")
                out.write(header("Subprocess Error %s" % test.name))
                traceback.print_exc(file=out)
                out.flush()
                out.close()
                return exit_with_code(1)

            out.close()
            return exit_with_code(0)
Exemple #50
0
def main():
    header()
    make_money_slips()
Exemple #51
0
    dfsample = dfsample.reset_index(drop=True)
    dfsample = dfsample.reindex(np.random.permutation(dfsample.index),
                                copy=False)
    return dfsample


# NOTE : training_head is the first 1500000 rows of shuffled normalized data set used for training only
# NOTE : training_tail is the last 786885 rows of shuffled normalized data set used for validation only

##########################
# building the training set
##########################

class_ratio = joblib.load(ddir + 'joblib/class_ratio')
df = pd.read_csv(ddir + 'training_head.csv', sep=';',
                 names=header()).fillna('')
for i in range(9):
    print i
    dfsample = training_sample_random(df, N=456, class_ratio=class_ratio)
    dfsample.to_csv(ddir + 'training_random.csv.' + str(i),
                    sep=';',
                    index=False,
                    header=False)

##########################
# building the validation set
##########################

class_ratio = joblib.load(ddir + 'joblib/class_ratio')
df = pd.read_csv(ddir + 'training_tail.csv', sep=';',
                 names=header()).fillna('')
Exemple #52
0
#####################
# create sample set
# from training set
#####################

# NOTE : USE class_ratio to estimate class balance and random_sampling.py to get training sample set

#######################
# training
# stage1 : Categorie1
# stage3 : Categorie3|Categorie1
#######################

dftrain = pd.read_csv(ddir + 'training_random.csv' + ext,
                      sep=';',
                      names=header()).fillna('')
dfvalid = pd.read_csv(ddir + 'validation_random.csv' + ext,
                      sep=';',
                      names=header()).fillna('')
dftest = pd.read_csv(ddir + 'test_normed.csv',
                     sep=';',
                     names=header(test=True)).fillna('')

add_txt(dftrain)
add_txt(dfvalid)
add_txt(dftest)

dftrain = dftrain[['Categorie3', 'Categorie1', 'txt']]
dfvalid = dfvalid[['Categorie3', 'Categorie1', 'txt']]
dftest = dftest[['Identifiant_Produit', 'txt']]
    def __run_tests(self, tests):
        if len(tests) == 0:
            return 0

        logging.debug("Start __run_tests.")
        logging.debug("__name__ = %s", __name__)

        error_happened = False
        if self.os.lower() == 'windows':
            logging.debug("Executing __run_tests on Windows")
            for test in tests:
                with open(self.current_benchmark,
                          'w') as benchmark_resume_file:
                    benchmark_resume_file.write(test.name)
                with self.quiet_out.enable():
                    if self.__run_test(test) != 0:
                        error_happened = True
        else:
            logging.debug("Executing __run_tests on Linux")

            # Setup a nice progressbar and ETA indicator
            widgets = [
                self.mode, ': ',
                progressbar.Percentage(), ' ',
                progressbar.Bar(), ' Rough ',
                progressbar.ETA()
            ]
            pbar = progressbar.ProgressBar(widgets=widgets,
                                           maxval=len(tests)).start()
            pbar_test = 0

            # These features do not work on Windows
            for test in tests:
                pbar.update(pbar_test)
                pbar_test = pbar_test + 1
                if __name__ == 'benchmark.benchmarker':
                    print header("Running Test: %s" % test.name)
                    with open(self.current_benchmark,
                              'w') as benchmark_resume_file:
                        benchmark_resume_file.write(test.name)
                    with self.quiet_out.enable():
                        test_process = Process(target=self.__run_test,
                                               name="Test Runner (%s)" %
                                               test.name,
                                               args=(test, ))
                        test_process.start()
                        test_process.join(self.run_test_timeout_seconds)
                    self.__load_results(
                    )  # Load intermediate result from child process
                    if (test_process.is_alive()):
                        logging.debug(
                            "Child process for {name} is still alive. Terminating."
                            .format(name=test.name))
                        self.__write_intermediate_results(
                            test.name, "__run_test timeout (=" +
                            str(self.run_test_timeout_seconds) + " seconds)")
                        test_process.terminate()
                        test_process.join()
                    if test_process.exitcode != 0:
                        error_happened = True
            pbar.finish()

        if os.path.isfile(self.current_benchmark):
            os.remove(self.current_benchmark)
        logging.debug("End __run_tests.")

        if error_happened:
            return 1
        return 0
Exemple #54
0
def main():
    clear()

    header()

    CashMachineConsole.call_operation()
Exemple #55
0
import random
from sklearn.externals import joblib
from utils import itocat1,itocat2,itocat3
from utils import cat1toi,cat2toi,cat3toi
from utils import cat3tocat2,cat3tocat1,cat2tocat1
from utils import cat1count,cat2count,cat3count
import sys

ddir = '/home/ngaude/workspace/data/cdiscount/'

assert len(sys.argv) == 2  ##### usage guess.py $RESULTAT.CSV ####
rname  = sys.argv[1]
assert isfile(ddir+rname) ##### usage guess.py $RESULTAT.CSV ####


test_normed = pd.read_csv(ddir+'test_normed.csv',sep=';',names=header(True)).fillna('')
add_txt(test_normed)
test_num_word = map(lambda t:len(set(t.split())),test_normed.txt)

test_nn = pd.read_csv(ddir+'test_nn.csv',sep=';').fillna('')
test_nn['Marque'] = test_nn.Marque_nn
test_nn['Libelle'] = test_nn.Libelle_nn
test_nn['Description'] = test_nn.Description_nn
add_txt(test_nn)
nn_num_word = map(lambda t:len(set(t.split())),test_nn.txt)
test_nn.drop('Marque', axis=1, inplace=True)
test_nn.drop('Libelle', axis=1, inplace=True)
test_nn.drop('Description', axis=1, inplace=True)

best = pd.read_csv(ddir+rname,sep=';')
#best = pd.read_csv('proba.auto.merging.60.csv',sep=';')
Exemple #56
0
__credits__ = []
__version__ = "1.0"
__status__ = "Development"


os.environ['FOR_IGNORE_EXCEPTIONS'] = '1'
os.environ['FOR_DISABLE_CONSOLE_CTRL_HANDLER'] = '1'

if __name__ == "__main__":
    import sys

    while True:
        try:
            while True:
                system('cls')  # limpa a tela
                header()  # exibe nome do sistema, nomes dos desenvolvedores e telefone da FATEC Ourinhos
                # escolha de qual problema ser executado
                option = input(
                    ' 1. Determinar quantos hectares devem ser cultivados para cada uma das plantações e '
                    'tendo lucro máximo.\n'
                    ' 2. Determinar quantos hectares devem ser cultivados para cada uma das plantações e '
                    'quanto de cada criação deve ser mantido para maximizar a receita líquida.\n'
                    ' 3. Maximizar o valor presente das plantações e escolher quais cultivar.\n\n'
                    ' Qual problema deve ser executado? ')
                system('cls')  # limpa a tela
                header()  # exibe nome do sistema, nomes dos desenvolvedores e telefone da FATEC Ourinhos
                if option.__eq__('1'):
                    Problem1()
                    break
                elif option.__eq__('2'):
                    Problem2()
Exemple #57
0
    assert which in ("mc", "lc"), "Only most or least common"

    data = arr.copy()
    for i in range(arr.shape[1]):
        mc = most_common(data[:, i])
        if which == "lc":
            mc = 1 - mc
        data = data[data[:, i] == mc]
        if data.shape[0] == 1:
            break
    return data[0]


if __name__ == "__main__":
    data = read_array("data/03/input.txt")

    print(header("Part 1"))
    mc = most_common(data)
    lc = 1 - mc
    print("γ rate  =", mc, bits2int(mc))
    print("ε rate  =", lc, bits2int(lc))
    print("Product =", bits2int(mc) * bits2int(lc))

    print(header("Part 2"))
    o2 = iterative_mc_lc(data)
    co2 = iterative_mc_lc(data, which="lc")
    print("O2 rating  =", o2, bits2int(o2))
    print("CO2 rating =", co2, bits2int(co2))
    print("Product    =", bits2int(o2) * bits2int(co2))
    print()
Exemple #58
0
    def __run_test(self, test):

        # Used to capture return values
        def exit_with_code(code):
            if self.os.lower() == 'windows':
                return code
            else:
                sys.exit(code)

        logDir = os.path.join(self.full_results_directory(), test.name.lower())
        try:
            os.makedirs(logDir)
        except Exception:
            pass
        with open(os.path.join(logDir, 'out.txt'), 'w') as out:

            if test.os.lower() != self.os.lower() or test.database_os.lower() != self.database_os.lower():
                out.write("OS or Database OS specified in benchmark_config.json does not match the current environment. Skipping.\n")
                return exit_with_code(0)

            # If the test is in the excludes list, we skip it
            if self.exclude != None and test.name in self.exclude:
                out.write("Test {name} has been added to the excludes list. Skipping.\n".format(name=test.name))
                return exit_with_code(0)

            out.write("test.os.lower() = {os}  test.database_os.lower() = {dbos}\n".format(os=test.os.lower(),dbos=test.database_os.lower()))
            out.write("self.results['frameworks'] != None: {val}\n".format(val=str(self.results['frameworks'] != None)))
            out.write("test.name: {name}\n".format(name=str(test.name)))
            out.write("self.results['completed']: {completed}\n".format(completed=str(self.results['completed'])))
            if self.results['frameworks'] != None and test.name in self.results['completed']:
                out.write('Framework {name} found in latest saved data. Skipping.\n'.format(name=str(test.name)))
                print 'WARNING: Test {test} exists in the results directory; this must be removed before running a new test.\n'.format(test=str(test.name))
                return exit_with_code(1)
            out.flush()

            out.write(header("Beginning %s" % test.name, top='='))
            out.flush()

            ##########################
            # Start this test
            ##########################
            out.write(header("Starting %s" % test.name))
            out.flush()
            try:
                if test.requires_database():
                    p = subprocess.Popen(self.database_ssh_string, stdin=subprocess.PIPE, stdout=out, stderr=out, shell=True)
                    p.communicate("""
            sudo restart mysql
            sudo restart mongod
            sudo service postgresql restart
            sudo service cassandra restart
            /opt/elasticsearch/elasticsearch restart
          """)
                    time.sleep(10)

                    st = verify_database_connections([
                        ("mysql", self.database_host, 3306),
                        ("mongodb", self.database_host, 27017),
                        ("postgresql", self.database_host, 5432),
                        ("cassandra", self.database_host, 9160),
                        ("elasticsearch", self.database_host, 9200)
                    ])
                    print "database connection test results:\n" + "\n".join(st[1])

                self.__cleanup_leftover_processes_before_test();

                if self.__is_port_bound(test.port):
                    # We gave it our all
                    self.__write_intermediate_results(test.name, "port " + str(test.port) + " is not available before start")
                    out.write(header("Error: Port %s is not available, cannot start %s" % (test.port, test.name)))
                    out.flush()
                    print "Error: Unable to recover port, cannot start test"
                    return exit_with_code(1)

                result, process = test.start(out)
                if result != 0:
                    self.__stop_test(out, process)
                    time.sleep(5)
                    out.write( "ERROR: Problem starting {name}\n".format(name=test.name) )
                    out.flush()
                    self.__write_intermediate_results(test.name,"<setup.py>#start() returned non-zero")
                    return exit_with_code(1)

                logging.info("Sleeping %s seconds to ensure framework is ready" % self.sleep)
                time.sleep(self.sleep)

                ##########################
                # Verify URLs
                ##########################
                logging.info("Verifying framework URLs")
                passed_verify = test.verify_urls(logDir)

                ##########################
                # Nuke /tmp
                ##########################
                try:
                    subprocess.check_call('sudo rm -rf /tmp/*', shell=True, stderr=out, stdout=out)
                except Exception:
                    out.write(header("Error: Could not empty /tmp"))

                ##########################
                # Benchmark this test
                ##########################
                if self.mode == "benchmark":
                    logging.info("Benchmarking")
                    out.write(header("Benchmarking %s" % test.name))
                    out.flush()
                    test.benchmark(logDir)

                ##########################
                # Stop this test
                ##########################
                out.write(header("Stopping %s" % test.name))
                out.flush()
                self.__stop_test(out, process)
                out.flush()
                time.sleep(5)

                if self.__is_port_bound(test.port):
                    # This can happen sometimes - let's try again
                    self.__stop_test(out, process)
                    out.flush()
                    time.sleep(5)
                    if self.__is_port_bound(test.port):
                        # We gave it our all
                        self.__write_intermediate_results(test.name, "port " + str(test.port) + " was not released by stop")
                        out.write(header("Error: Port %s was not released by stop %s" % (test.port, test.name)))
                        out.flush()
                        return exit_with_code(1)

                out.write(header("Stopped %s" % test.name))
                out.flush()

                ##########################################################
                # Remove contents of  /tmp folder
                ##########################################################
                if self.clear_tmp:
                    try:
                        filelist = [ f for f in os.listdir("/tmp") ]
                        for f in filelist:
                            try:
                                os.remove("/tmp/" + f)
                            except OSError as err:
                                print "Failed to remove " + str(f) + " from /tmp directory: " + str(err)
                    except OSError:
                        print "Failed to remove contents of /tmp directory."

                ##########################################################
                # Save results thus far into the latest results directory
                ##########################################################

                out.write(header("Saving results through %s" % test.name))
                out.flush()
                self.__write_intermediate_results(test.name,time.strftime("%Y%m%d%H%M%S", time.localtime()))

                if self.mode == "verify" and not passed_verify:
                    print "Failed verify!"
                    return exit_with_code(1)
            except (OSError, IOError, subprocess.CalledProcessError) as e:
                self.__write_intermediate_results(test.name,"<setup.py> raised an exception")
                out.write(header("Subprocess Error %s" % test.name))
                traceback.print_exc(file=out)
                out.flush()
                try:
                    self.__stop_test(out, process)
                except (subprocess.CalledProcessError) as e:
                    self.__write_intermediate_results(test.name,"<setup.py>#stop() raised an error")
                    out.write(header("Subprocess Error: Test .stop() raised exception %s" % test.name))
                    traceback.print_exc(file=out)
                    out.flush()
                out.close()
                return exit_with_code(1)
            # TODO - subprocess should not catch this exception!
            # Parent process should catch it and cleanup/exit
            except (KeyboardInterrupt) as e:
                self.__stop_test(out, process)
                out.write(header("Cleaning up..."))
                out.flush()
                self.__finish()
                sys.exit(1)

            out.close()
            return exit_with_code(0)
Exemple #59
0
    def verify_type(test_type):
      verificationPath = os.path.join(logPath, test_type)
      try:
        os.makedirs(verificationPath)
      except OSError:
        pass
      with open(os.path.join(verificationPath, 'verification.txt'), 'w') as verification:
        test = self.runTests[test_type]
        test.setup_out(verification)
        verification.write(header("VERIFYING %s" % test_type.upper()))

        base_url = "http://%s:%s" % (self.benchmarker.server_host, self.port)

        try:
          # Verifies headers from the server. This check is made from the
          # App Server using Pythons requests module. Will do a second check from
          # the client to make sure the server isn't only accepting connections
          # from localhost on a multi-machine setup.
          results = test.verify(base_url)

          # Now verify that the url is reachable from the client machine, unless
          # we're already failing
          if not any(result == 'fail' for (result, reason, url) in results):
            p = subprocess.call(["ssh", "TFB-client", "curl -sSf %s" % base_url + test.get_url()], shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
            if p is not 0:
              results = [('fail', "Server did not respond to request from client machine.", base_url)]
              logging.warning("""This error usually means your server is only accepting
                requests from localhost.""")
        except ConnectionError as e:
          results = [('fail',"Server did not respond to request", base_url)]
          logging.warning("Verifying test %s for %s caused an exception: %s", test_type, self.name, e)
        except Exception as e:
          results = [('fail',"""Caused Exception in TFB
            This almost certainly means your return value is incorrect,
            but also that you have found a bug. Please submit an issue
            including this message: %s\n%s""" % (e, traceback.format_exc()),
            base_url)]
          logging.warning("Verifying test %s for %s caused an exception: %s", test_type, self.name, e)
          traceback.format_exc()

        test.failed = any(result == 'fail' for (result, reason, url) in results)
        test.warned = any(result == 'warn' for (result, reason, url) in results)
        test.passed = all(result == 'pass' for (result, reason, url) in results)

        def output_result(result, reason, url):
          specific_rules_url = "http://frameworkbenchmarks.readthedocs.org/en/latest/Project-Information/Framework-Tests/#specific-test-requirements"
          color = Fore.GREEN
          if result.upper() == "WARN":
            color = Fore.YELLOW
          elif result.upper() == "FAIL":
            color = Fore.RED

          verification.write(("   " + color + "%s" + Style.RESET_ALL + " for %s\n") % (result.upper(), url))
          print ("   " + color + "%s" + Style.RESET_ALL + " for %s\n") % (result.upper(), url)
          if reason is not None and len(reason) != 0:
            for line in reason.splitlines():
              verification.write("     " + line + '\n')
              print "     " + line
            if not test.passed:
              verification.write("     See %s\n" % specific_rules_url)
              print "     See %s\n" % specific_rules_url

        [output_result(r1,r2,url) for (r1, r2, url) in results]

        if test.failed:
          self.benchmarker.report_verify_results(self, test_type, 'fail')
        elif test.warned:
          self.benchmarker.report_verify_results(self, test_type, 'warn')
        elif test.passed:
          self.benchmarker.report_verify_results(self, test_type, 'pass')
        else:
          raise Exception("Unknown error - test did not pass,warn,or fail")

        verification.flush()
Exemple #60
0
import pathlib
import pandas as pd
from app import app, server
from utils import colors, fonts, header
from pages import batsmenGraphs

PATH = pathlib.Path(__file__)
DATA_PATH = PATH.joinpath("../data").resolve()

selected = ['V Kohli', 'SPD Smith']
df_main = pd.read_csv(DATA_PATH.joinpath("innings.csv"))
available_players = df_main['Name'].unique()

app.layout = html.Div([
    dcc.Location(id='url', refresh=False),
    html.Div(header()),
    html.Div(id='batsmenGraphs-head',
             children=[
                 html.H3(
                     ['Batsmen Compared'],
                     style={
                         'textAlign': 'center',
                         'color': colors['title'],
                         'fontFamily': fonts['title'],
                         'position': 'relative',
                         'top': '-100px',
                         'left': '20%'
                     }),
                 html.Div(
                     [
                         html.Div(