def isNewGenerationByOS(machine, project_name):
    os = machine['sourceProperties'].get('os').lower() if machine['sourceProperties'].get('os') else 'none'
    if os.startswith('microsoft'):
        splited_os = os.split(' ')
        if len(splited_os) > 3:
            # Microsoft Windows Server 2003
            # Microsoft Windows Server 2008 R1
            # Microsoft Windows Server 2012 R1
            ws_version = splited_os[3]
            if ws_version.isdigit():
                ws_version = int(ws_version)
                if ws_version > 2003:
                    if 2008 == ws_version or 2012 == ws_version:
                        if len(splited_os)>4 and splited_os[4]=='r2':
                            return True
                    else:
                        return True
        return False
    elif os.startswith('linux'):
        #Linux version 3 below
        #Linux version 3.0.101-63-default (geeko@buildhost) (gcc version 4.3.4 [gcc-4_3-branch revision 152973] (SUSE Linux) ) #1 SMP Tue Jun 23 16:02:31 UTC 2015 (4b89d0c)
        splited_os = os.split(' ')
        if len(splited_os) >= 3:
            if len(splited_os[2])>0:
                if splited_os[2][0].isdigit():
                    if int(splited_os[2][0]) > 2:
                        return True
    print("Project : {} , HostName : {} is Unidentified OS".format(project_name, machine['sourceProperties']['name']))
    return False
Beispiel #2
0
def default_os(
    default_architecture,
    default_partitiontable,
    default_pxetemplate,
    os=None,
):
    if os is None:
        os = (entities.OperatingSystem().search(
            query={
                'search':
                'name="RedHat" AND (major="{0}" OR major="{1}")'.format(
                    RHEL_6_MAJOR_VERSION, RHEL_7_MAJOR_VERSION)
            })[0].read())
    else:
        major = os.split(' ')[1].split('.')[0]
        minor = os.split(' ')[1].split('.')[1]
        os = (entities.OperatingSystem().search(
            query={
                'search':
                f'family="Redhat" AND major="{major}" AND minor="{minor}"'
            })[0].read())
    os.architecture.append(default_architecture)
    os.ptable.append(default_partitiontable)
    os.provisioning_template.append(default_pxetemplate)
    os.update(['architecture', 'ptable', 'provisioning_template'])
    os = entities.OperatingSystem(id=os.id).read()
    return os
Beispiel #3
0
    def ipv4_hostname_output(self):

        ipv4_host_title = '| {0:16} | {1:16} | {2:18} | {3:10} | {4:18} | {5:1}'.format('Host', 'IPv4', 'MAC', 'Domain', 'Server Type', 'Windows OS (Server Fingerprint)')

        print '-' * blessings.Terminal().width
        print self.color(ipv4_host_title, char='')
        print '-' * blessings.Terminal().width

        server_type = ''

        for host in sorted(self.hosts):

            ipv4 = self.hosts[host]['ipv4']
            mac = self.hosts[host]['mac']

            if host != None and '*' not in host:

                if 'fqdn' in self.hosts[host].keys():
                    print self.hosts[host]['fqdn']

                mac = self.hosts[host]['mac']
                os = self.hosts[host]['os']
                nt_version = None
                os_version = os
                serverlist = {'domain_controller': 'DC', 'backup_controller': 'Backup DC', 'sql_server': 'SQL', 'print': 'Printer'}
                host_comment = None

                if 'comment' in self.hosts[host].keys():
                    host_comment = self.hosts[host]['comment']

                if os != None and not os.startswith('Microsoft'):
                    nt_version = os.split('(')[1].split(')')[0].strip()
                    os_version = os.split('(')[0].strip()

                if host_comment != None and list(host_comment)[0] != '\x00':
                    os_version += ' ({})'.format(host_comment.capitalize())

                domain = self.hosts[host]['domain']
                #notes = self.hosts[host]['notes']

                if 'server_keys' in self.hosts[host].keys():

                    servers = []
                    server_types = self.hosts[host]['server_keys']

                    for server in server_types:

                        if server_types[server] == '1' and server in serverlist.keys():
                            servers.append(serverlist[server])

                    ipv4_host_output = '| {0:16} | {1:16} | {2:18} | {3:10} | {4:18} | {5:1}'.format(host.upper(), ipv4, mac, domain, ','.join(servers).strip(), os_version)

                    print self.color(ipv4_host_output, char='')

        print '-' * blessings.Terminal().width
        print ''
Beispiel #4
0
    def get_win8_targets(self):

        for host in self.hosts.keys():

            os = self.hosts[host]['os']

            if os != None and not os.startswith('Microsoft'):
                nt_version = os.split('(')[1].split(')')[0].strip()
                os_version = os.split('(')[0].strip()

                if 'Win 8' in os_version:
                    print host
Beispiel #5
0
    def get_latest_os_version(os):
        latest_os_version, current_os_version = '', ''
        if 'Flatcar' in os:
            ver = requests.get(
                "https://stable.release.flatcar-linux.net/amd64-usr/current/version.txt"
            )
            latest_os_version = re.findall('(FLATCAR_VERSION=)(.+)', ver.text)
            current_os_version = os.split()[5]

            if version.parse(str(current_os_version)) < version.parse(
                    str(latest_os_version[0][1])):
                print(Output.YELLOW + "[WARNING] " + Output.RESET + \
                "Cluster nodes are not running on latest {}{}"\
                .format(latest_os_version[0][0], latest_os_version[0][1]))
                status = 'outdated'
                latest_os_version = latest_os_version[0][1]
            else:
                status = 'latest'
                latest_os_version = latest_os_version[0][1]

        elif 'CoreOS' in os:
            print(Output.YELLOW + "[WARNING] " + Output.RESET + \
            "Cluster nodes are running on CoreOS which is DPERECATED: https://coreos.com/os/eol/. " + \
            "PLEASE CONSIDER CHANGING THE DEPRECATED OS!")
            latest_os_version = 'EOL'
            status = 'EOL'

        return version_check.append(
            ['OS', latest_os_version, current_os_version, status])
Beispiel #6
0
def autocreate_model(basexml, weather, randomxml='random.xml', schedule=None):
    randompath = os.path.split(os.path.abspath(randomxml))[0]
    found = False
    while not found:
        random_model(basexml, randomxml)
        randomxmlstr = None
        with open(randomxml) as randomxmlfile:
            randomxmlstr = randomxmlfile.read()
        idf = idfxml.xml_to_idf(randomxmlstr)
        idfname = '{}.idf'.format(randomxml)
        with open(idfname, 'w') as idffile:
            idffile.write(str(idf))
        if schedule is not None:
            schedule = [(schedule, os.split(schedule)[1])]
        runner = eplus.EnergyPlus()
        results = runner.run(idfname,
                             weather,
                             supplemental_files=schedule,
                             working_directory=randompath)
        if results is not None:
            found = True
            try:
                os.remove(idfname)
            except:
                pass
            return results
Beispiel #7
0
def recursive_move(src, dest, pattern):
    '''Recursively moves files by extension
    Args:
        src (str): String source directory
        dest (str): String destination directory
        pattern: (see example)
    Example:
        pattern = re.compile('.*\.(mov|MOV|avi|mpg)$')
    '''
    import directories
    import os
    import numpy as np

    filelist = file_list(src, pattern)

    for f in file_list:
        d, name = os.split(f)
        name, ext = os.path.splitext(f)
        child_dir = os.path.split(d)
        dest_dir = dest+child_dir

        # Replace src with dest in file path
        #if !exist create dest/child_d
        directories.mkdir_p(dest_dir)
        #mv file dest/child_d
        os.rename(f, dest_dir+name+ext)
Beispiel #8
0
def handle_options(cfg, options):
    # hack alert:
    if options.dumptime:
        cfg['dump-req-times'] = True
    if options.dumpatom:
        cfg['dump-atom-reqs'] = True
    if options.stampMsgs:
        cfg['stamp-messages'] = True

    # Bonus hack for -C
    if options.extraconfig:
        for os in options.extraconfig:
            if ':' not in os:
                cfg[os] = True
            else:
                on, ov = os.split(':', 1)
                cfg[on] = ov
    # Ditto for -D, which is at least simpler.
    if options.rmconfig:
        for os in options.rmconfig:
            cfg.rm(os)
    # Forcefully disable caching.
    if options.noCache:
        cfg.rm("cachedir")
        cfg.rm("render-cache")
        cfg.rm("bfc-cache-ttl")
        cfg.rm("imc-cache-entries")
    if options.noBruteCache:
        cfg.rm("bfc-cache-ttl")
        cfg.rm("imc-cache-entries")
    # We rely on being able to rerun checkGoodConfig(), which may
    # take some work in the underlying system.
    cfg.checkGoodConfig()
Beispiel #9
0
def handle_options(cfg, options):
	# hack alert:
	if options.dumptime:
		cfg['dump-req-times'] = True
	if options.dumpatom:
		cfg['dump-atom-reqs'] = True
	if options.stampMsgs:
		cfg['stamp-messages'] = True

	# Bonus hack for -C
	if options.extraconfig:
		for os in options.extraconfig:
			if ':' not in os:
				cfg[os] = True
			else:
				on, ov = os.split(':', 1)
				cfg[on] = ov
	# Ditto for -D, which is at least simpler.
	if options.rmconfig:
		for os in options.rmconfig:
			cfg.rm(os)
	# Forcefully disable caching.
	if options.noCache:
		cfg.rm("cachedir")
		cfg.rm("render-cache")
		cfg.rm("bfc-cache-ttl")
		cfg.rm("imc-cache-entries")
	if options.noBruteCache:
		cfg.rm("bfc-cache-ttl")
		cfg.rm("imc-cache-entries")		
	# We rely on being able to rerun checkGoodConfig(), which may
	# take some work in the underlying system.
	cfg.checkGoodConfig()
Beispiel #10
0
def autocreate_model(basexml, weather, randomxml='random.xml', schedule=None):
    randompath = os.path.split(os.path.abspath(randomxml))[0]
    found = False
    while not found:
        random_model(basexml, randomxml)
        randomxmlstr = None
        with open(randomxml) as randomxmlfile:
            randomxmlstr = randomxmlfile.read()
        idf = idfxml.xml_to_idf(randomxmlstr)
        idfname = '{}.idf'.format(randomxml)
        with open(idfname, 'w') as idffile:
            idffile.write(str(idf))
        if schedule is not None:
            schedule = [(schedule, os.split(schedule)[1])]
        runner = eplus.EnergyPlus()
        results = runner.run(idfname, weather, 
                             supplemental_files=schedule, 
                             working_directory=randompath)
        if results is not None:
            found = True
            try:
                os.remove(idfname)
            except:
                pass
            return results
Beispiel #11
0
def run_test(ups_path,WITH_MPI=False,NUM_PROCS=1,RESTART=False,DAMPING_OFF_NEW_END_TIME=False,
             POST_PROC_ONLY=False):
  ''' '''
  print '\nRunning test:\t',os.path.split(ups_path)[1]

  #Determine root path
  root_path = os.path.split(os.path.abspath(ups_path))[0]

  #Determine uda path
  print "Root path = ", root_path
  print "Ups path = ", ups_path
  F_ups = open(ups_path,"r")
  ups_lines = F_ups.read()
  uda_path = root_path + '/' + ups_lines.split('<filebase>')[1].split('</filebase>')[0].strip()
  F_ups.close()    
  print "UDA path = ", uda_path
  
  #Change current working directory to root path
  os.chdir(root_path)
  #Open runlog
  F_log = open(root_path+'/TEST_RUNLOG_'+os.path.split(ups_path)[1],"w")
  #Construct the argument list for subprocess to use.
  if not(WITH_MPI) or int(NUM_PROCS)<=1:
    args = [uintah_exe,os.path.split(ups_path)[1]]
  else:
    args = ['mpirun','-np',str(int(NUM_PROCS)), uintah_exe,'-mpi',os.path.split(ups_path)[1]]

  if POST_PROC_ONLY:
    uda_path = uda_path+'.000'
  else:
    #Run the test and wait for it to complete
    tmp = sub_proc.Popen(args,stdout=F_log,stderr=sub_proc.PIPE)
    dummy = tmp.wait()
    F_log.close()
    #If test calls for retstart
    if RESTART:
      #If turn damping off and run to new end time
      if DAMPING_OFF_NEW_END_TIME:
        #Setup the restart by setting damping to zero and modifying end time
        print 'Setting <artificial_damping_coeff> to zero and restarting with new end time of ',format(NEW_END_TIME,'1.4e')
        setup_restart(uda_path,DAMPING_OFF_NEW_END_TIME)
        print 'Done.\nRestarting...'
        #Open new runlog
        F_log = open(root_path+'/TEST_RUNLOG_RESTART_'+os.split(ups_path)[1],"w")
        #Construct the argument list
        if not(WITH_MPI) or NUM_PROCS<=1:
  	  args = [uintah_exe,'-restart','-move',uda_path+'.000']
        else:
          args = ['mpirun','-np',str(int(NUM_PROCS)), uintah_exe,'-mpi','-restart','-move',uda_path+'.000']
        #Run the test and wait for it to complete
        tmp = sub_proc.Popen(args,stdout=F_log,stderr=sub_proc.PIPE)
        dummy = tmp.wait()
        F_log.close()
        uda_path = uda_path+'.001'
    else:
      uda_path = uda_path+'.000'

  print('Test done.')  
  return uda_path
Beispiel #12
0
def run_test(ups_path,WITH_MPI=False,NUM_PROCS=1,RESTART=False,DAMPING_OFF_NEW_END_TIME=False):
  ''' '''
  print '\nRunning test:\t',os.path.split(ups_path)[1]

  #Determine root path
  root_path = os.path.split(os.path.abspath(ups_path))[0]

  #Determine uda path
  print "Root path = ", root_path
  print "Ups path", ups_path
  F_ups = open(ups_path,"r")
  ups_lines = F_ups.read()
  uda_path = root_path + "/" + ups_lines.split('<filebase>')[1].split('</filebase>')[0].strip()
  F_ups.close()    
  print "UDA path", uda_path

  #Change current working directory to root path
  os.chdir(root_path)

  if RUN_TESTS:
    #Open runlog
    F_log = open(root_path+'/TEST_RUNLOG_'+os.path.split(ups_path)[1],"w")
    #Construct the argument list for subprocess to use.
    if not(WITH_MPI) or int(NUM_PROCS)<=1:
      args = [uintah_exe, os.path.split(ups_path)[1]]
    else:
      args = ['mpirun','-np',str(int(NUM_PROCS)), uintah_exe,'-mpi',os.path.split(ups_path)[1]]
    #Run the test and wait for it to complete
    print "Arguments = ", args
    print "F_log = ", F_log
    tmp = sub_proc.Popen(args,stdout=F_log,stderr=sub_proc.PIPE)
    dummy = tmp.wait()
    F_log.close()
    #If test calls for retstart
    if RESTART:
      #If turn damping off and run to new end time
      if DAMPING_OFF_NEW_END_TIME:
	#Setup the restart by setting damping to zero and modifying end time
	print 'Setting <artificial_damping_coeff> to zero and restarting with new end time of ',format(NEW_END_TIME,'1.4e')
	setup_restart(uda_path,DAMPING_OFF_NEW_END_TIME)
	print 'Done.\nRestarting...'
	#Open new runlog
	F_log = open(root_path+'/TEST_RUNLOG_RESTART_'+os.split(ups_path)[1],"w")
	#Construct the argument list
	if not(WITH_MPI) or NUM_PROCS<=1:
	  args = [ uintah_exe,'-restart','-move',uda_path+'.000']
	else:
	  args = ['mpirun','-np',str(int(NUM_PROCS)), uintah_exe,'-mpi','-restart','-move',uda_path+'.000']
	#Run the test and wait for it to complete
	tmp = sub_proc.Popen(args,stdout=F_log,stderr=sub_proc.PIPE)
	dummy = tmp.wait()
	F_log.close()
  if RESTART:
    uda_path = uda_path+'.001'
  else:
    uda_path = uda_path+'.000'
  print('Test done.')
  return uda_path
Beispiel #13
0
 def get_os(self):
     os = self.info.distribution_version.lower()
     to_be_replaced = ['\n', ' ', 'gnu/linux']
     for _ in to_be_replaced:
         if _ in os:
             os = os.replace(_, '')
     if self.info.deliverable_type == "dmg":
         major_version = os.split('.')
         os = major_version[0] + '.' + major_version[1]
     return os
Beispiel #14
0
def assemble_suffixed_filename(filename, suffix=0):
    '''
    Split document filename, to attach suffix to the name part then
    re attacht the extension
    '''
    
    if suffix:
        name, extension = filename.split(os.split(os.extsep))
        return SUFFIX_SEPARATOR.join([name, unicode(suffix), os.extsep, extension])
    else:
        return file_filename
Beispiel #15
0
 def get_os(self, i):
     b = i.split('),')
     for i in b:
         if ',' in i or 'or' in i:
             pass
         else:
             if '(' in i:
                 os = i.split('(')[0].strip()
                 if 'Aggressive OS guesses' in os:
                     os = os.split(':')[-1]
                 return os
	def __login(self):
		conf = configparser.ConfigParser()
		if os.path.exists(self.__PATH):
			conf.read(self.__PATH)
			sa = conf.get(self.db, 'accountname')
			pw = conf.get(self.db, 'password')
			host = conf.get(self.db, 'ip')
			port = conf.get(self.db, 'port')
			dbname = conf.get(self.db, 'dbname')
			return sa, pw, host, port, dbname
		else:
			raise IOError('NotFoundFile:{}'.format(os.split(self.__PATH)[-1]))
Beispiel #17
0
def assemble_suffixed_filename(filename, suffix=0):
    '''
    Split document filename, to attach suffix to the name part then
    re attacht the extension
    '''

    if suffix:
        name, extension = filename.split(os.split(os.extsep))
        return SUFFIX_SEPARATOR.join(
            [name, unicode(suffix), os.extsep, extension])
    else:
        return file_filename
Beispiel #18
0
def ok_dep(dep):
    os = dep.get('os')
    if os is not None:
        oses = {x.strip().lower() for x in os.split(',')}
        if OS_NAME not in oses:
            return False
    py = dep.get('python')
    if py is not None:
        q = ok_dep.major_version
        if isinstance(py, str):
            return q < int(py[1:])
        return q >= py
    return True
    def get_latest_os_version(os, logger):
        latest_os_version, current_os_version, status = [''] * 3
        if 'Flatcar' in os:
            session = requests.Session()
            ver = session.get(
                "https://stable.release.flatcar-linux.net/amd64-usr/current/version.txt"
            )
            session.close()
            latest_os_version = re.findall('(FLATCAR_VERSION=)(.+)', ver.text)
            current_os_version = os.split()[5]

            if version.parse(str(current_os_version)) < version.parse(
                    str(latest_os_version[0][1])):
                logger.warning("Cluster nodes are not running on latest {}{}"\
                        .format(latest_os_version[0][0], latest_os_version[0][1]))
                status = Nodes.outdated
                latest_os_version = latest_os_version[0][1]
            else:
                status = Nodes.latest
                latest_os_version = latest_os_version[0][1]

        elif 'CoreOS' in os:
            logger.warning("Cluster nodes are running on CoreOS which is DPERECATED: https://coreos.com/os/eol/. " + \
            "PLEASE CONSIDER CHANGING THE DEPRECATED OS!")
            latest_os_version = 'EOL'
            status = 'EOL'
        elif 'Ubuntu' in os:
            current_os_version = re.sub('[^0-9.]', '', os)
            session = requests.Session()
            ver = requests.get("https://api.launchpad.net/devel/ubuntu/series")
            session.close()
            for x in ver.json()['entries']:
                if 'Current Stable Release' in x['status']:
                    latest_os_version = x['version']
                    if version.parse(str(current_os_version)) < version.parse(
                            str(latest_os_version)):
                        logger.warning(
                            "Cluster nodes are not running on latest Ubuntu version."
                        )
                        status = Nodes.outdated
                    else:
                        status = Nodes.latest
        else:
            latest_os_version, current_os_version, status = [
                'OS not supported'
            ] * 3

        return version_check.append(
            ['OS', latest_os_version, current_os_version, status])
Beispiel #20
0
def getParams():
	switches, files= parse_options(sys.argv[1:],default_options.copy())
	if switches.has_key('f'):
		os=open(switches['f']).read()
		s2, f2 = parse_options(os.split(),switches)
		f2=[f for f in f2 if f]
		switches.update(s2)
		files.extend(f2)
	for o in switches.keys():
		if option_dtypes.has_key(o):
			switches[o]=option_dtypes[o](switches[o])
	if 't' in switches.keys():
		testme(files, switches)
		sys.exit()
	return (files, switches)
Beispiel #21
0
 def function_wrapper(obj, omesh_path=None, use_global_frame=True):
     if not omesh_path:
         omeshfolder_path = os.path.join("./meshes/", obj.Label)
         omesh_path = os.path.join(omeshfolder_path, "full")
     else:
         omeshfolder_path = os.split(omesh_path)[0]
     if not os.path.exists(omeshfolder_path):
         os.makedirs(omeshfolder_path)
     prev_pl = obj.Placement
     if not use_global_frame:
         pl = getGlobalPlacement(obj)
         obj.Placement = pl.inverse().multiply(obj.Placement)
     res = func(obj, omesh_path)
     obj.Placement = prev_pl
     return res
Beispiel #22
0
def arch_update(d, prefix, gcc_version):
    arch = d.get(prefix+'_ARCH', True)
    gccspec = arch_gccspec(arch, gcc_version)
    (cpu, vendor, os) = arch_split(arch)
    d[prefix+'_CPU'] = cpu
    d[prefix+'_VENDOR'] = vendor
    d[prefix+'_OS'] = os
    ost = os.split('-',1)
    if len(ost) > 1:
        d[prefix+'_BASEOS'] = ost[0]
    else:
        d[prefix+'_BASEOS'] = ""
    for spec in gccspec:
        if spec in ("abi flags"):
            continue
        d[prefix+'_'+spec.upper()] = gccspec[spec]
    return
Beispiel #23
0
def arch_update(d, prefix, gcc_version):
    arch = d.get(prefix+'_ARCH', True)
    gccspec = arch_gccspec(arch, gcc_version)
    (cpu, vendor, os) = arch_split(arch)
    d[prefix+'_CPU'] = cpu
    d[prefix+'_VENDOR'] = vendor
    d[prefix+'_OS'] = os
    ost = os.split('-',1)
    if len(ost) > 1:
        d[prefix+'_BASEOS'] = ost[0]
    else:
        d[prefix+'_BASEOS'] = ""
    for spec in gccspec:
        if spec in ("abi flags"):
            continue
        d[prefix+'_'+spec.upper()] = gccspec[spec]
    return
	def __download_album(self, audio_dl_uri):

		fullpath = urlparse.urlparse(audio_dl_uri).path
		basename = os.split(fullpath)[1]
		destpath = os.path.join(magnatune_in_progress_dir, basename)

		shell = self.get_property('shell')
		manager = shell.get_player().get_property('ui-manager')
		manager.get_action("/MagnatuneSourceViewPopup/MagnatuneCancelDownload").set_sensitive(True)
		self.__downloading = True
		self.cancelled = False

		self.__downloads[audio_dl_uri] = 0

		# no way to resume downloads, sadly
		out = open(destpath, 'w')

		dl = rb.ChunkLoader()
		dl.get_url_chunks(audio_dl_uri, 4*1024, True, self.__download_album_chunk, (audio_dl_uri, destpath, out))
Beispiel #25
0
def import_user_module(user_dir: str, no_print: bool = False):
    """Given a user dir, this function imports it as a module.

    This user_module is expected to have an __init__.py at its root.
    You can use import_files to import your python files easily in
    __init__.py

    Args:
        user_dir (str): directory which has to be imported
        no_print (bool): This function won't print anything if set to true
    """
    if user_dir:
        user_dir = get_absolute_path(user_dir)
        module_parent, module_name = os.split(user_dir)

        if module_name not in sys.modules:
            sys.path.insert(0, module_parent)
            if not no_print:
                print(f"Importing user_dir from {user_dir}")
            importlib.import_module(module_name)
            sys.path.pop(0)
Beispiel #26
0
model.compile('adam', 'sparse_categorical_crossentropy')
model.summary()

model.fit(
    x=[np.array(encode_input), np.array(decode_input)],
    y=np.array(decode_output),
    epochs=10,
    batch_size=128,
)

# Predict
decoded = decode(
    model,
    encode_test_input,
    start_token=token_dict['<START>'],
    end_token=token_dict['<END>'],
    pad_token=token_dict['<PAD>'],
)

predicted_y = []

for d_x in decoded:
    os = ' '.join(map(lambda x: token_dict_inv[x], d_x[1:-1]))
    os = os.split(' ')
    predicted_y.append([os])

bleu_score = nltk.translate.bleu_score.corpus_bleu(predicted_y,
                                                   test_y_tokens,
                                                   weights=(0.5, 0.5))
print(bleu_score)
Beispiel #27
0
 def _parse_host_os(self, os):
     split = os.split('-')
     return '{} {}'.format(split[0].strip(), split[1].strip())
Beispiel #28
0
                try:
                    jobPipeline, pipelineThread = runningPipelinesPerHost[host]

                    if pipelineThread.isAlive():
                        logging.info('job %s on host %s is running' %
                                     (jobPipeline.job['JobId'], host))
                    else:
                        logging.info('job %s on host %s is done' %
                                     (jobPipeline.job['JobId'], host))
                        pipelineThread.join(10)
                        del runningPipelinesPerHost[host]

                        if jobPipeline.finishedSuccessfully:
                            try:
                                if os.path.isdir(path):
                                    path, filename = os.split(
                                        jobPipeline.job['filename'])
                                    os.rename(
                                        jobPipeline.job['filename'],
                                        os.path.join(path, 'done', filename))
                            except Exception as e:
                                logger.error(str(e))
                        else:
                            #retry, put back in jobqueue
                            logging.info(
                                'job %s on host %s was put back in the queue' %
                                (jobPipeline.job['JobId'], host))
                            hostJobQueues[host].append(jobPipeline.job)
                except Exception as e:
                    logger.error(str(e))

            time.sleep(30)
Beispiel #29
0
    dates_versions_response = requests.get(
        main_url + 'aggregates_by/build_id/channels/' + args.ch + '/dates/').text
    dates_versions_response = literal_eval(dates_versions_response)
    dates_for_versions = defaultdict(list)
    for item in dates_versions_response:
        dates_for_versions[item['version']].append(item['date'])
    last_n_versions = sorted(dates_for_versions.keys())[-args.n_versions:]
    versions_with_dates = {v: dates_for_versions[v] for v in last_n_versions}

    for v in versions_with_dates.keys():
        try:
            os.makedirs(os.path.join(args.dir, 'nightly_' + v))
        except OSError:
            pass
        options = requests.get(
            main_url + 'filters/?channel=' + args.ch + '&version=' + v).text
        options = literal_eval(options)
        options['os'] = np.unique([os.split(',')[0] for os in options['os']])
        for m in options['metric']:
            whole_data_for_metric = []
            ops = list(product(
                [dates_for_versions[v]], [m], [args.ch], [v], options['os'],
                options['application'], options['architecture']
            ))
            for data in pool.imap_unordered(_get_data, ops):
                whole_data_for_metric.append(data)
            # the data is saved as a list of elements from `_get_data`, see docstring
            with open(os.path.join(args.dir,
                                   'nightly_' + v + '/' + m + '.json'), 'w') as file:
                json.dump(whole_data_for_metric, file)
Beispiel #30
0
     if re.search(
             'tns', prod
     ) and services.port == 1521 or services.port == 1526:
         prod = "tns"
     if re.search(
             'apache', prod
     ) and services.port == 7777 or services.port == 7778:
         prod = "apache"
     if re.search('terminal', prod) and services.port == 3389:
         prod = "rdp"
     if len(prod.split()) > 1:
         prod = prodreplace(prod).strip()
     if len(ver.split('.')) > 2:
         i = iter(ver.split('.'))
         ver = map('.'.join, zip(i, i))[0]
     if len(os.split()) > 1:
         i = iter(os.split())
         os = map(''.join, zip(i, i))[0]
         os = osreplace(os).strip()
 if 'product' in serv and 'version' in serv and 'ostype' in serv and not 'extrainfo' in serv:
     sp, pe = test_patterns(serv, ['product:'])
     sv, ve = test_patterns(serv, ['version:'])
     so, oe = test_patterns(serv, ['ostype:'])
     prod = serv[pe:(sv - 1)].strip().lower()
     ver = serv[ve:(so - 1)].strip().lower()
     os = serv[oe:].strip().lower()
     if services.port == 443:
         prod = "https"
         os = "windows"
     if re.search('microsoft-ds', prod) and services.port == 445:
         prod = "smb"
Beispiel #31
0
# -*- coding: utf-8 -*-
#title           :init.py
#description     :This python script creates the Config file
#author          :Stefan Baumgartner
#date            :16.06.2018
#version         :1.00
#usage           :python init.py
#notes           :Please change only when you know what you do!!
#python_version  :2.7.6
#=======================================================================
# Import the modules needed to run the script.
import sys, os, subprocess, platform
# Reads the Distribution on your System
os = str(platform.dist())
config = ""
oslist = os.split(',')
branchversion1 = os.replace('\'', "")
branchversion2 = branchversion1.replace('(', "")
branchversion3 = branchversion2.replace(')', "")
branchversion4 = branchversion3.replace(',', "")


def f_write_config():
    with open("/usr/local/share/mocis/config/config.cfg", "w") as OS:
        OS.write('OS = %s\n' % branchversion4[:-1])
        ostxt = "Your currently installed Linux Distribution is: "
        print ostxt + branchversion4[:-1]
        print 'File created'
        return

Beispiel #32
0
            # check which jobs are done
            busyHosts = set(runningPipelinesPerHost.keys())

            for host in busyHosts:
                try:
                    jobPipeline, pipelineThread = runningPipelinesPerHost[host]

                    if pipelineThread.isAlive():
                        logging.info('job %s on host %s is running' % (jobPipeline.job['JobId'], host))
                    else:
                        logging.info('job %s on host %s is done' % (jobPipeline.job['JobId'], host))
                        pipelineThread.join(10)
                        del runningPipelinesPerHost[host]

                        if jobPipeline.finishedSuccessfully:
                            try:
                                if os.path.isdir(path):
                                    path, filename = os.split(jobPipeline.job['filename'])
                                    os.rename(jobPipeline.job['filename'], os.path.join(path, 'done', filename))
                            except Exception as e:
                                logger.error(str(e))
                        else:
                            #retry, put back in jobqueue
                            logging.info('job %s on host %s was put back in the queue' % (jobPipeline.job['JobId'], host))
                            hostJobQueues[host].append(jobPipeline.job)
                except Exception as e:
                    logger.error(str(e))

            time.sleep(30)
Beispiel #33
0
def olger_parser():
    f = open(sys.argv[1])
    xml_content = f.read()
    print(xml_content)
    f.close()
    dat = (json.loads(xml_content))

    timestamp = datetime.fromtimestamp(float(dat["scan"]["time"]))
    print(timestamp)
    dateelastic = str(str(timestamp).split(" ")[0]) + "T" + str(
        str(timestamp).split(" ")[1]) + "Z"
    address = ""
    plist = ""
    cvelist = {}
    graph = {}
    nodes = []
    links = []
    id = 0
    for x in dat["systems"]:
        if "up" in x["status"]:

            nodes.append({
                "id": str(id) + "" + str(x["ip"]),
                "host": str(x["ip"])
            })

            for e in x["services"]:
                if "open" in e["state"]:
                    #print e

                    try:
                        product = e["banner"].split("product: ")[1]
                        try:
                            product = product.split("version: ")[0]
                        except:
                            pass

                        try:
                            product = product.split("ostype: ")[0]
                        except:
                            pass

                        try:
                            product = product.split("hostname: ")[0]
                        except:
                            pass

                        product = product.strip()
                    except:
                        pass
                        product = ""

                    try:
                        version = e["banner"].split("product: ")[1].split(
                            "version: ")[1]
                        try:
                            version = version.split("ostype: ")[0]
                        except:
                            pass
                        try:
                            version = version.split("hostname: ")[0]
                        except:
                            pass
                        try:
                            version = version.split("extrainfo: ")[0]
                        except:
                            pass

                        version = version.strip()
                    except:
                        pass
                        version = ""
                    os2 = ""
                    try:
                        os2 = e["banner"].split("ostype: ")[1]
                        try:
                            os2 = os.split("devicetype: ")[0]
                        except:
                            pass
                            os2 = ""
                    except:
                        pass
                        os2 = ""

                    try:
                        cpe = e["cpe"]
                    except:
                        pass
                        cpe = ""
                    cves = ""
                    cvecount = "0"
                    print(cpe)
                    #try:
                    cvelist = ""
                    csv_content = ""
                    cvecount = 0
                    csv_content = ""
                    f = open("data/cvedata.csv", "w")
                    f.write(str(""))
                    f.close()
                    if product != "" and version != "":
                        print("CHEKING CVES ...")

                        os.system(
                            "python3 scripts/cvdetails-lookup.py --csv data/cvedata.csv --product \""
                            + product + "\" --version \"" + version + "\"")
                        try:

                            os.system(
                                "python3 scripts/cvdetails-lookup.py --csv data/cvedata.csv --product \""
                                + product + "\" --version \"" + version + "\"")
                            f = open("data/cvedata.csv")
                            csv_content = f.read()
                            f.close()
                            cvecount = len(str(csv_content).split("\n"))
                            cvelist = ""
                            for x2 in str(csv_content).split("\n"):

                                cvelist = x2.split(";")[0] + "," + cvelist
                            print(cvelist)
                        except:
                            pass

                    nodes = {v['id']: v for v in nodes}.values()
                    #uncomment this line to send data to elastic
                    #toelastic={"customer":sys.argv[2],"timestamp":dateelastic,"address":x["ip"],"domains":"","lat":"","long":"","os":os,"port":str(e["port"]),"portname":str(e["name"]),"product":str(product),"version":str(version),"cpe":str(cpe),"cves":str(cvelist[product]),"cvecount":int(cvecount)}
                    if str(e["port"]) != "":
                        links.append({
                            "source": str(id) + "" + str(x["ip"]),
                            "target": str(id) + "" + str(e["port"]),
                            "value": "port"
                        })

                        nodes.append({
                            "id": str(id) + "" + str(e["port"]),
                            "host": str(id) + "" + str(e["port"])
                        })

                    if str(e["name"]) != "":

                        nodes.append({
                            "id": str(id) + "" + str(e["name"]),
                            "host": str(e["name"])
                        })

                        links.append({
                            "source": str(id) + "" + str(x["ip"]),
                            "target": str(id) + "" + str(e["name"]),
                            "value": "portname"
                        })

                    print "product : " + filter(str(product))
                    if str(product) != "":
                        print filter(str(product))

                        print str(x["ip"])

                        nb = filter(str(product))
                        try:
                            inventory_by_product[nb][id] = str(x["ip"])
                        except:
                            inventory_by_product[nb] = {}
                            inventory_by_product[nb][id] = str(x["ip"])
                            pass

                        nodes.append({
                            "id": str(id) + "" + str(product),
                            "host": str(product)
                        })
                        links.append({
                            "source": str(id) + "" + str(x["ip"]),
                            "target": str(id) + "" + str(product),
                            "value": "product"
                        })
                    if str(version) != "":
                        nodes.append({
                            "id": str(id) + "" + str(version),
                            "host": str(version)
                        })
                        links.append({
                            "source": str(id) + "" + str(version),
                            "target": str(id) + "" + str(version),
                            "value": "version"
                        })
                    if str(os2) != "":
                        nodes.append({
                            "id": str(id) + "" + str(os2),
                            "host": str(os2)
                        })
                        links.append({
                            "source": str(id) + "" + str(x["ip"]),
                            "target": str(os2),
                            "value": "OS"
                        })

                    if str(cvelist) != "":
                        try:
                            nodes.append({
                                "id": str(id) + "" + str(cvelist),
                                "host": str(cvelist)
                            })
                            links.append({
                                "source": str(id) + "" + str(x["ip"]),
                                "target": str(id) + "" + str(cvelist),
                                "value": "cves"
                            })
                        except:
                            pass

                    #send data to elastic
                    #print(elkpush("box_"+sys.argv[2],toelastic))
            id = id + 1
            graph = {"nodes": nodes, "links": links}

    data_grouped = ""
    i = 0
    for a in inventory_by_product:
        print "\n" + a + "\n"
        data_grouped = data_grouped + "\n\n[" + a + "]\n\n"
        for b in inventory_by_product[a]:
            print b
            data_grouped = data_grouped + "\n" + inventory_by_product[a][
                b] + "\n"
            i = i + 1

    f = open("data/inventory", "w")
    f.write(str(data_grouped))
    f.close()
    with open("web/graphs/data.json", 'w') as outfile:
        json.dump(graph, outfile)
Beispiel #34
0
    def _load_extension(self):
        if hasattr(self, "__extension"):
            return

        self.__guess()
        return self.__guess_.extension if hasattr(self.__guess_, "mime") else "Unknown extension ." + os.split(self.file)[1]
Beispiel #35
0
import subprocess, os

cwd = os.getcwd()

#subprocess.call(['makeotf', '-f', 'Galada.ufo', '-o', 'Galada.otf', '-ff', 'features', '-gf', 'glyphOrder', '-mf', 'menuname', '-r'], cwd=cwd)

print os.split(cwd)
Beispiel #36
0
def p_r(X_train, label_train, X_test, label_test, dist_func='cos', pic=True, prefix='', suffix='', compute_ma=True, title='', save_results=True, save_path='', show_error=False):
    """
    X_train: 2d-ndarray, 被搜索的样本集矩阵,每行是一个样本向量
    X_test: 2d-ndarray, query组成的矩阵,每行是一个query向量
    label_train: 1-d ndarray, 被搜索样本集类标,非one-hot
    label_test: 1-d ndarray, query集合的类标,非one-hot     
    计算精度和召回率, 要求类标是用向量形式,不能使用one-hot型
    param prefix, suffix: str, 前缀/后缀标识字符串,主要用于在批量执行程序时保证保存的文件和图像不会互相覆盖
    para save_results: bool, 是否保存中间计算矩阵,在批量执行程序时建议选择False,以避免产生过多文件
    compute_ma: bool, 是否需要计算mean average precision
    """
    if (prefix!='' or suffix!=''): #当同一个外部程序多次调用p_r函数时,为了方面区分结果对应的问题
        print 'now computing mission: ' + prefix + suffix + '=================================='

    #(1).预处理
    if save_path != '':
        #if platform.system() == 'Linux' and save_path[-1] != '/':
        #    save_path += '/'
        #if platform.system() == 'Windows' and save_path[-1] != '\\':
        #    save_path += '\\'
        if os.split(path)[-1] != '':
            os.path.join(save_path, '')
        check_path(save_path) #检测路径,如果目录不存在,支持递归建立
    assert isinstance(label_train, numpy.ndarray), label_train.ndim == 1
    assert isinstance(label_test, numpy.ndarray), label_test.ndim == 1
    assert dist_func in ['cos', 'euc', 'adj_cos']
    #X_train = X_train[:800, :] #测试程序用的数据子集==================
    #X_test = X_test[:800, :] #测试程序用的数据子集=================
    #label_test = label_test[:800] #==================
    #label_train = label_train[:800] #======================
    t0 = time.clock()
    rows = X_test.shape[0]
    cols = X_train.shape[0]
    #下面使用了抽象的dict()函数,方便更换不同的度量
    if dist_func is 'cos':
        distance_matrix = cosine_dist(X_test=X_test, X_train=X_train) 
    if dist_func is 'euc':
        distance_matrix =  - euclidean_dist(X_test=X_test, X_train=X_train) #使用欧氏距离,距离越小认为相似度越高,所以前面加负号
    if dist_func is 'adj_cos':
        distance_matrix = adjusted_cosine(X_test=X_test, X_train=X_train)
    assert distance_matrix.shape == (rows, cols)
    t1 = time.clock()
    #print '预处理时间:', t1 - t0
    #sys.stdout.flush()
    
    #(2).主计算过程
    t2 = time.clock()
    #由于numpy.sort()没有参数reverse,所以采用先由小到大排序在反转列序的办法
    #返回距离由达到小的下标矩阵
    argsort_matrix = numpy.argsort(distance_matrix, axis=1)[:, ::-1]
    #求出距离排序后每一行的对应位置样本的类标组成的矩阵
    label_matrix = numpy.ones(shape=(rows, cols)) * label_train
    sorted_label_matrix = numpy.zeros(shape=label_matrix.shape)
    #循环类标矩阵的每一行,按argsort_matrix的相应行作为下标进行调整
    for i in xrange(label_matrix.shape[0]):
        sorted_label_matrix[i] = label_matrix[i][argsort_matrix[i]]
    #将测试集X_test的类标向量label_test作为列向量平行复制X_train.shape[0]列,得到一个矩阵
    test_label_matrix = numpy.ones(shape=(rows, cols)) * label_test[:, numpy.newaxis]
    #通过比较类标是否相同,得到一个布尔矩阵
    bool_matrix = (test_label_matrix == sorted_label_matrix)
    accum_correct_matrix = numpy.zeros(shape=(rows, cols), dtype='float64')
    #TODO:下面的计算可能会比较慢。原因是每次切片是矩阵的一列而非一行,应该考虑先转置再按行切片赋值
    accum_correct_matrix[:, 0] = bool_matrix[:, 0]
    for i in xrange(1, bool_matrix.shape[1]):
        accum_correct_matrix[:, i] = accum_correct_matrix[:, i-1] + bool_matrix[:, i]
    t3 = time.clock()
    #print '主机算过程用时:', t3 - t2
    #sys.stdout.flush()
       
    #(3).获得精度矩阵和召回率矩阵
    t4 = time.clock()
    precision_denominator = numpy.ones(shape=(rows, cols), dtype='float64') * numpy.arange(1, cols+1)
    precision_matrix = accum_correct_matrix / precision_denominator #每个位置元素为(当前总正确数/当前总搜索数)
    recall_denominator = accum_correct_matrix[:, -1] #向量,索引为i的分量值表示训练样本集中与测试样本i同类的样本的总数
    recall_matrix = accum_correct_matrix / recall_denominator[:, numpy.newaxis]
    
    #(4).求测试集所有query在训练集上的平均召回率和平均精度
    mean_precision = numpy.mean(precision_matrix, axis=0)
    mean_recall = numpy.mean(recall_matrix, axis=0)
    t5 = time.clock()
    #print '计算精度矩阵和召回率矩阵用时: ', t5 - t4
    #sys.stdout.flush()
    
    t6 = time.clock()
    #(5).计算mean_average precision
    if compute_ma is True:
        mean_avg_precision = mean_average_precision(bool_matrix=bool_matrix, accum_correct_matrix=accum_correct_matrix, 
                                                prefix=prefix, suffix=suffix,save_path=save_path + prefix + 'mean_average_precision' + suffix)
        assert isinstance(mean_avg_precision, numpy.ndarray), mean_avg_precision.ndim == 1
        print '所有query的平均mean average precion: ', mean_avg_precision.mean()
        #print '计算mean average precision用时: ', time.clock() - t6
        sys.stdout.flush()

    #(6).输出错误分析
    #假设词干化词频文档已经生成,保存在某个目录下,当前任务只是指出在某个query下某个文档的排序,距离和自身类标
    if show_error is True:
        assert bool_matrix.dtype == 'bool' #检测
        f = open('query_erro.txt', 'w')
        f.write('query序号        文档排序序号        文档原始序号        文档与query相似度        文档类标        搜索结果是否正确 \n')
        f.write('以上几个指标中,文档排序序号其实是随行号增加单调递增,意义不大\n')
        for i in xrange(rows):
            query_idx = i + 1 #1.query的序号
            for j in xrange(cols):
                order_idx = j + 1 #2.距离排序序号,序号越小距离query越近,相似度越大
                doc_idx = argsort_matrix[i][j] + 1 #3.文档原始序号
                qd_dist = distance_matrix[i][doc_idx - 1] #4.文档与query的距离
                doc_label = sorted_label_matrix[i][j] #5.文档类标
                tf_mark = bool_matrix[i][j] #6.文档是否与query属于同一类
                f.write('        '.join((str(i) for i in [query_idx, order_idx, doc_idx, qd_dist, doc_label, tf_mark]))) #字符串合并少用加号
                f.write('\n')
        f.close()
        print 'query error输出完成...'
        sys.stdout.flush()
    
    #(7).保存计算结果
    if save_results:
        numpy.save(save_path + prefix + 'precision_matrix' + suffix + '.npy', precision_matrix)
        numpy.save(save_path + prefix + 'recall_matrix' + suffix + '.npy', recall_matrix)
        numpy.save(save_path + prefix + 'mean_precision' + suffix + '.npy', mean_precision)
        numpy.save(save_path + prefix + 'mean_recall' + suffix + '.npy', mean_recall)
    #print 'model saved...'
    #sys.stdout.flush()
    
    f = open(save_path +prefix+'mean_precision_record'+suffix, 'w')
    for num, i in enumerate(mean_precision):
        f.write(str(num) + ': ' + str(i) + '\n')
    f.close()
    
    f = open(save_path + prefix+'mean_recall_record'+suffix, 'w')
    for num, i in enumerate(mean_recall):
        f.write(str(num) + ': ' + str(i) + '\n')
    f.close()

    #(8).根据计算结果绘制p-r曲线
    if pic is True:
        #print 'drawing curve...'
        pr_curve(title=title, mean_precision=mean_precision, mean_recall=mean_recall, prefix=prefix, suffix=suffix, save_path=save_path) #绘制召回率-精度曲线
Beispiel #37
0
def getOsImage():
    # Return OS Specifications
    os = open('/etc/os-release').read()
    imageName = os.split('\n')
    return imageName[0].replace('PRETTY_NAME=', '').replace('"', '')
Beispiel #38
0
	if tmp:
		file = 0
	else:
		tmp = re.search(r'Linux-Servers-(ITSM).csv',onlyfile,re.IGNORECASE)
		if tmp:
			file = 1

	if file == 0:
		for line in open(onlyfile,'r').readlines():
			if count > 1:
				line = line.strip()
				line = re.sub(r'\"',r'',line)
				parts = line.split(',')
				name = parts[0].upper()
				os = parts[2]
				os = os.split(':')
				os = os[1]
				ip = parts[3]
				state = parts[1]
				tools = parts[4]
				status = parts[5]
				nic = parts[9]
				appliance = parts[8]
				if not vCenter.has_key(name):
					vCenter[name] = os
					vCenterIP[name] = ip
					vCenterState[name] = state
					vCenterToolsV[name] = tools
					vCenterVMStatus[name] = status
					vCenterNIC[name] = nic
					vCenterAppliance[name] = appliance
import os
import shutil
import subprocess
import tempfile

from seqtools.general import load_csv

GENE_FILE = os.path.join(
    os.split(__file__)[0],
    '../data/gene.txt',
)

WORK_DIR = tempfile.mkdtemp()

QSUB_HEADERS = '#!/bin/bash\n' \
               '#$ -N "trinity"\n' \
               '#$ -e errors.log\n' \
               '#$ -cwd\n\n' \
               'module load trinity/2.1.1\n' \
               'module load samtools/1.3\n\n'

TRINITY_CMD = 'Trinity ' \
              '--genome_guided_bam {0} ' \
              '--genome_guided_max_intron 100000 ' \
              '--CPU 8 --max_memory 20G ' \
              '--output {1}\n'


def run_trinity_gene_qsub(gene_name, bam_file, output_dir,
                          pad=1000, # expand gene coordinates by 1kb
                          gene_file=GENE_FILE, work_dir=WORK_DIR,
Beispiel #40
0
#! /usr/bin/env dreamthon
import os
import time


trash = (*files, trash_dir:'~/.Trash') ->
    files.each (f) ->
        to = os.join trash_dir, os.split(f)[-1] + '.' + time.now.iso
        print "{f} -> {to}"
        os.mv f, to 

(= trash (-> (*files trash_dir:'~/.Trash')
    (files .each (-> (f)
        (= to (os .join trash_dir (+ ((os .split f) -1) '.' (time .now .iso))))
        (print "{f} -> {to}")
        (os .mv f to)))))

main = -> trash *os.argv[1:]