Пример #1
0
def info_component(p_comp_dict, p_kount):
    if p_kount > 1:
        print(style_start + ("-" * 90) + style_end)

    if len(p_comp_dict['home_url']) > 0:
        print(style_start + "     Project: " + style_end + p_comp_dict['project'] + " (" + p_comp_dict['home_url'] + ")")
    else:
        print(style_start + "     Project: " + style_end + p_comp_dict['project'])

    if "built_on" in p_comp_dict:
        print(style_start + "   Component: " + style_end +
              p_comp_dict['component'] + " " +
              p_comp_dict['version'] + " built on " +
              p_comp_dict['built_on'])
    else:
        print(style_start + "   Component: " + style_end + p_comp_dict['component'] + " " + p_comp_dict['version'])

    if p_comp_dict['port'] > 1:
        print(style_start + "        port: " + style_end + str(p_comp_dict['port']))

    if p_comp_dict['datadir'] > "":
        print(style_start + "     datadir: " + style_end + p_comp_dict['datadir'])
    if p_comp_dict['logdir']  > "":
        print(style_start + "      logdir: " + style_end + p_comp_dict['logdir'])
    if p_comp_dict['autostart'] == "on":
        print(style_start + "   autostart: " + style_end + p_comp_dict['autostart'])
    if p_comp_dict['svcname'] > "" and util.get_platform() == "Windows":
        print(style_start + "     svcname: " + style_end + p_comp_dict['svcname'])
    if p_comp_dict['svcuser'] > "" and util.get_platform() == "Linux":
        print(style_start + "     svcuser: "******"      status: " + style_end + p_comp_dict['status'] + \
              style_start + " for " + style_end + p_comp_dict['up_time'])
    else:
        if 'status' in p_comp_dict:
            print(style_start + "      status: " + style_end + p_comp_dict['status'])
        if 'up_time' in p_comp_dict:
            print(style_start + "    up since: " + style_end + p_comp_dict['up_time'])
    if 'data_size' in p_comp_dict:
        print(style_start + "   data size: " + style_end + p_comp_dict['data_size'])
    if 'connections' in p_comp_dict:
        print(style_start + " connections: " + style_end + p_comp_dict['connections'])
    print(style_start + "Release Date: " + style_end + p_comp_dict['release_date'] + \
          style_start + "  Stage: " + style_end + p_comp_dict['stage'])
    if len(p_comp_dict['sup_plat']) > 0:
        print(style_start + "Supported On: " + style_end + p_comp_dict['sup_plat'])
    print(style_start + "   IsCurrent: " + style_end + str(p_comp_dict['is_current']) + \
          style_start + "  IsInstalled: " + style_end + str(p_comp_dict['is_installed']))
    if p_comp_dict['relnotes']:
        print (style_start + " Release Notes : " + style_end )
        print (p_comp_dict['relnotes'] )
Пример #2
0
def run(dataset,model_config):
    if util.get_platform() == 'Windows':
        mrmr_exe = '../extern/mRMR/mrmr_win32.exe'
        converter_exe = '../install/bin/Converter.exe'
    else:
        mrmr_exe = '../extern/mRMR/mrmr_redhat_32'
        converter_exe = '../install/bin/Converter'

    dst_folder = dataset.name + '/mRMR'
    if os.path.exists(dst_folder) == False:
        os.makedirs(dst_folder)

    data_dim = dataset.dim
    data_num = dataset.data_num

    #bs_list = l1_def.get_lambda_list(ds,'mRMR')

    sel_feat_num_list = [x for x in dataset.mrmr_l0_list if x <= 500]

    train_file = dataset.train_file
    csv_train_file =  train_file + '.csv'

    for sel_feat_num in sel_feat_num_list:
        raw_model_file = dst_folder + '/raw_model_%d' %sel_feat_num
        model_file = dst_folder + '/model_%d' %sel_feat_num

        #run mRMR
        mrmr_train_time  = 0
        #prepare training data
        if os.path.exists(raw_model_file) == False:
            if os.path.exists(csv_train_file) == False:
                #convert data
                print 'convert data'
                cmd = converter_exe + ' -i %s' %train_file + ' -o %s' %csv_train_file
                cmd += ' -st libsvm -dt csv'
                cmd = cmd.replace('/',os.sep)
                print cmd
                os.system(cmd)


            prev_cmd = mrmr_exe + ' -v %d' %data_dim + ' -t 0.5 -i %s' %csv_train_file 
            cmd = prev_cmd + ' -n %d' %sel_feat_num + ' > %s' %raw_model_file
            cmd = cmd.replace('/',os.sep)
            print cmd
            start_time =time.time()
            os.system(cmd)
            end_time = time.time()

            #parse learning time
            mrmr_train_time = (float)(end_time - start_time)

        if os.path.exists(model_file) == False:
            #parse result
            parse_model_file(raw_model_file,model_file, mrmr_train_time);

    if os.path.exists(csv_train_file) == True:
        os.remove(csv_train_file)
Пример #3
0
def run(dataset, model_config):
    if util.get_platform() == 'Windows':
        mrmr_exe = '../extern/mRMR/mrmr_win32.exe'
        converter_exe = '../install/bin/Converter.exe'
    else:
        mrmr_exe = '../extern/mRMR/mrmr_redhat_32'
        converter_exe = '../install/bin/Converter'

    dst_folder = dataset.name + '/mRMR'
    if os.path.exists(dst_folder) == False:
        os.makedirs(dst_folder)

    data_dim = dataset.dim
    data_num = dataset.data_num

    #bs_list = l1_def.get_lambda_list(ds,'mRMR')

    sel_feat_num_list = [x for x in dataset.mrmr_l0_list if x <= 500]

    train_file = dataset.train_file
    csv_train_file = train_file + '.csv'

    for sel_feat_num in sel_feat_num_list:
        raw_model_file = dst_folder + '/raw_model_%d' % sel_feat_num
        model_file = dst_folder + '/model_%d' % sel_feat_num

        #run mRMR
        mrmr_train_time = 0
        #prepare training data
        if os.path.exists(raw_model_file) == False:
            if os.path.exists(csv_train_file) == False:
                #convert data
                print 'convert data'
                cmd = converter_exe + ' -i %s' % train_file + ' -o %s' % csv_train_file
                cmd += ' -st libsvm -dt csv'
                cmd = cmd.replace('/', os.sep)
                print cmd
                os.system(cmd)

            prev_cmd = mrmr_exe + ' -v %d' % data_dim + ' -t 0.5 -i %s' % csv_train_file
            cmd = prev_cmd + ' -n %d' % sel_feat_num + ' > %s' % raw_model_file
            cmd = cmd.replace('/', os.sep)
            print cmd
            start_time = time.time()
            os.system(cmd)
            end_time = time.time()

            #parse learning time
            mrmr_train_time = (float)(end_time - start_time)

        if os.path.exists(model_file) == False:
            #parse result
            parse_model_file(raw_model_file, model_file, mrmr_train_time)

    if os.path.exists(csv_train_file) == True:
        os.remove(csv_train_file)
Пример #4
0
def _python_info_dump():
    """ Function that is run in an subprocess to introspect system information
    """
    import sys  # @Reimport
    from distutils import util, sysconfig  # @Reimport
    print(".".join(str(v) for v in sys.version_info))
    print(sys.prefix)
    print(getattr(sys, "real_prefix", sys.prefix))
    print(1 if hasattr(sys, "gettotalrefcount") else 0)
    print(sys.hexversion)
    print(".".join(str(s) for s in getattr(sys, "subversion", "") if s))
    print(util.get_platform())
    print(sysconfig.get_config_var("LIBDIR") or "")
Пример #5
0
def _python_info_dump():
    """ Function that is run in an subprocess to introspect system information
    """
    import sys  # @Reimport
    from distutils import util, sysconfig  # @Reimport
    print(".".join(str(v) for v in sys.version_info))
    print(sys.prefix)
    print(getattr(sys, "real_prefix", sys.prefix))
    print(1 if hasattr(sys, "gettotalrefcount") else 0)
    print(sys.hexversion)
    print(".".join(str(s) for s in getattr(sys, "subversion", "") if s))
    print(util.get_platform())
    print(sysconfig.get_config_var("LIBDIR") or "")
Пример #6
0
def useradd_linux(p_user):
    print("Creating the user " + p_user)
    if not util.get_platform() == "Linux":
        print("ERROR: USERADD is a Linux only command.")
        return (1)

    if not util.is_admin():
        print("ERROR: Must be ROOT to run USERADD.")
        return (1)

    ## make sure the user exists....
    util.run_sudo("useradd -m " + p_user)

    return (0)
Пример #7
0
def run(dataset,model_config):
    if util.get_platform() == 'Windows':
        bif_exe = '../extern/FST3/x64/BIF.exe'
    else:
        bif_exe = '../extern/FST3/linux/BIF'

    converter_py = 'python ../tools/libsvm2arff.py'

    dst_folder = dataset.name + '/BIF'
    if os.path.exists(dst_folder) == False:
        os.makedirs(dst_folder)

    data_dim = dataset.dim
    data_num = dataset.data_num

    #bs_list = l1_def.get_lambda_list(ds,'bif')

    sel_feat_num_list = [x for x in dataset.l0_list] 

    train_file = dataset.train_file
    arff_train_file =  train_file + '.arff'

    for sel_feat_num in sel_feat_num_list:
        model_file = dst_folder + '/model_%d' %sel_feat_num

        #run BIF
        train_time  = 0
        #prepare training data
        if os.path.exists(model_file) == False:
            if os.path.exists(arff_train_file) == False:
                #convert data
                print 'convert data'
                cmd = converter_py + ' \"%s\" \"%s\"' %(train_file, arff_train_file)
                cmd = cmd.replace('/',os.sep)
                print cmd
                os.system(cmd)

            cmd = bif_exe + ' %s %d %s' %(arff_train_file, sel_feat_num, model_file)
            cmd = cmd.replace('/',os.sep)
            print cmd
            start_time =time.time()
            os.system(cmd)
            end_time = time.time()

            #parse learning time
            bif_train_time = (float)(end_time - start_time)

    if os.path.exists(arff_train_file) == True:
        os.remove(arff_train_file)
Пример #8
0
    def __analyze_dataset(self):
        info_file = self.train_file + '_info.txt'
    
        #if not analyzed before, analyze
        if os.path.exists(info_file) == False :
            if util.get_platform() == 'Windows':
                exe_name = r'..\install\bin\data_analysis.exe'
            else:
                exe_name = r'../install/bin/data_analysis'

            if os.path.exists(exe_name) == False:
                print 'analyze executable not exist!'
                sys.exit()
            print 'calculate dimension of %s' %self.train_file
            cmd = '{0} -i \"{1}\" -st libsvm >> {2}'.format(exe_name,self.train_file,info_file)
            print cmd
            os.system(cmd)

        #parse data num
        pattern = re.compile(r'data number\s*:\s*(\d+)')
        result_list = pattern.findall(open(info_file,'r').read())
        if len(result_list) != 1:
            print result_list
            print 'parse data number failed'
            sys.exit()
    
        self.data_num = (int)(result_list[0])

        #parse dimension
        pattern = re.compile(r'dimension\s*:\s*(\d+)')
        result_list = pattern.findall(open(info_file,'r').read())
        if len(result_list) != 1:
            print result_list
            print 'parse dimension failed'
            sys.exit()
    
        self.dim = (int)(result_list[0])

        #parse class number
        pattern = re.compile(r'class num\s*:\s*(\d+)')
        result_list = pattern.findall(open(info_file,'r').read())
        if len(result_list) != 1:
            print result_list
            print 'parse class num failed'
            sys.exit()
    
        self.class_num = (int)(result_list[0])
Пример #9
0
port = util.get_column('port', pgver)

isJson = os.getenv("isJson", None)
msg = pgver + " starting on port " + str(port)
if isJson:
    jsonMsg = {}
    jsonMsg['status'] = "wip"
    jsonMsg['component'] = pgver
    jsonMsg['msg'] = msg
    print(json.dumps([jsonMsg]))
else:
    print(msg)

cmd = sys.executable + " " + homedir + os.sep + "run-pgctl.py"

if util.get_platform() == "Linux" and autostart == "on":
    startup.start_linux("postgresql" + pgver[2:4])
else:
    startCmd = cmd + ' &'
    subprocess.Popen(startCmd,
                     preexec_fn=os.setpgrp(),
                     close_fds=True,
                     shell=True)

isYes = os.getenv("isYes", "False")
pgName = os.getenv("pgName", "")
if ((pgName > "") and (isYes == "True")):
    print("\n # waiting for DB to start...")
    time.sleep(4)
    cmd = os.path.join(pgver, 'bin', 'createdb')
    cmd = cmd + " -U postgres -w -e -p " + str(port) + " " + str(pgName)
Пример #10
0
def run(dataset, model_config, output_file):
    if util.get_platform() == 'Windows':
        exe_dir = '../extern/liblinear/windows/'
        train_exe = exe_dir + 'train.exe'
        test_exe = exe_dir + 'predict.exe'
    else:
        exe_dir = '../extern/liblinear/windows/'
        raise Exception('path to liblinear in linux is not set yet!')

    dst_folder = dataset.name + '/liblinear'
    if os.path.exists(dst_folder) == False:
        os.makedirs(dst_folder)

    result = util.ResultItem()

    c_list = dataset.c_list
    for c in c_list:
        model_file = dst_folder + '/model_%g.txt' % (c)
        predict_file = dst_folder + '/predict_%g.txt' % (c)
        result_file = dst_folder + '/result_%g.txt' % (c)
        test_file = dst_folder + '/test_%g.txt' % (c)

        result_once = util.ResultItem()
        if os.path.exists(result_file) == True:
            result_once.load_result(result_file)
        else:
            #clear the result file
            open(test_file, 'w').close()

            #training
            train_cmd = train_exe + ' -s 5 -c %f' % c + ' %s' % dataset.get_train_file(
                model_config['rand_num']) + ' %s' % model_file
            train_cmd = train_cmd.replace('/', os.sep)
            print train_cmd
            start_time = time.time()
            os.system(train_cmd)
            end_time = time.time()

            #hard to evaluate train_error, set to zero
            result_once.append_value('train_error', 0)

            #parse learning time
            train_time = (float)(end_time - start_time)
            result_once.append_value('train_time', train_time)

            #predict
            test_cmd = test_exe + ' %s' % dataset.test_file + ' %s' % model_file + ' %s' % predict_file + '>> %s' % test_file
            test_cmd = test_cmd.replace('/', os.sep)
            print test_cmd
            start_time = time.time()
            os.system(test_cmd)
            end_time = time.time()
            test_time = (float)(end_time - start_time)

            result_once.append_value('test_time', test_time)

            test_error = parse_test_error_rate(test_file)
            result_once.append_value('test_error', test_error)

            model_size = get_model_size(model_file)

            result_once.append_value('non_zero_num', model_size)

            sparse_rate = 100.0 - model_size * 100.0 / dataset.dim
            result_once.append_value('sparse_rate', sparse_rate)

            result_once.save_result(result_file)

        result.Append(result_once)

        print '\nTraining Result: '
        result_once.Display()
        print '\n'

    result.save_result(output_file)
    return result
Пример #11
0
def top(display=True, isJson=False):
  import psutil

  current_timestamp = int(time.mktime(datetime.utcnow().timetuple()))
  jsonDict = {}
  procs = []
  for p in psutil.process_iter():
    try:
      p = p.as_dict(attrs=
        ['pid', 'username', 'cpu_percent', 'memory_percent', 'cpu_times', 'name'])
    except (psutil.NoSuchProcess, IOError, OSError) as e:
      pass
    else:
      procs.append(p)

  if not display:
    return

  processes = sorted(procs, key=lambda p: p['cpu_percent'], reverse=True)

  network_usage = psutil.net_io_counters()
  jsonDict['kb_sent'] = network_usage.bytes_sent / 1024
  jsonDict['kb_recv'] = network_usage.bytes_recv / 1024

  cpu = psutil.cpu_times_percent(percpu=False)
  iowait = ""
  if util.get_platform() == "Linux":
    jsonDict['iowait'] = str(cpu.iowait)
    iowait = "," + str(cpu.iowait).rjust(5) + "%wa"

  jsonDict['current_timestamp'] = current_timestamp
  jsonDict['cpu_user'] = str(cpu.user)
  jsonDict['cpu_system'] = str(cpu.system)
  jsonDict['cpu_idle'] = str(cpu.idle)
  if not isJson:
    print("CPU(s):" + str(cpu.user).rjust(5) + "%us," + \
      str(cpu.system).rjust(5) + "%sy," + str(cpu.idle).rjust(5) + "%id" + iowait)

  disk = psutil.disk_io_counters(perdisk=False)
  read_kb = disk.read_bytes / 1024
  write_kb = disk.write_bytes / 1024
  jsonDict['kb_read']  = str(read_kb)
  jsonDict['kb_write']  = str(write_kb)
  if not isJson:
    print("DISK: kB_read " + str(read_kb) + ", kB_written " + str(write_kb))

  uptime = datetime.now() - datetime.fromtimestamp(psutil.boot_time())
  str_uptime = str(uptime).split('.')[0]
  line = ""
  if util.get_platform() == "Windows":
    uname_len = 13
  else:
    uname_len = 8
    av1, av2, av3 = os.getloadavg()
    str_loadavg = "%.2f %.2f %.2f  " % (av1, av2, av3)
    line = style_start + "Load average: " + style_end + str_loadavg
    jsonDict['load_avg']  = str(str_loadavg)
  line = line + style_start + "Uptime:" + style_end + " " + str_uptime
  jsonDict['uptime']  = str(str_uptime)
  if not isJson:
    print(line)

  i = 0
  my_pid = os.getpid()
  if not isJson:
    print("")
    print(style_start + "  PID " + "USER".ljust(uname_len) + "   %CPU %MEM      TIME+ COMMAND" + style_end)

  jsonList = []
  for pp in processes:
    if pp['pid'] == my_pid:
      continue
    i += 1
    if i > 10:
      break

    # TIME+ column shows process CPU cumulative time and it
    # is expressed as: "mm:ss.ms"

    ctime = timedelta(seconds=sum(pp['cpu_times']))
    ctime_mm = str(ctime.seconds // 60 % 60)
    ctime_ss = str(int(ctime.seconds % 60)).zfill(2)
    ctime_ms = str(ctime.microseconds)[:2].ljust(2, str(0))
    ctime = "{0}:{1}.{2}".format(ctime_mm, ctime_ss, ctime_ms)

    if util.get_platform() == "Windows":
      username = str(pp['username'])
      # shorten username by eliminating stuff before the backslash
      slash_pos = username.find('\\')
      if slash_pos > 0:
        username = username[(slash_pos + 1):]
      username = username[:uname_len]
    else:
      username = pp['username'][:uname_len]
    if isJson:
        pp['username'] = username
        pp['ctime'] = ctime
        pp['cpu_percent'] = float(pp['cpu_percent'])
        pp['memory_percent'] = float(round(pp['memory_percent'],1))
        jsonList.append(pp)
    else:
 
      print( str(pp['pid']).rjust(5) + " " + \
            username.ljust(uname_len) + " " + \
            str(pp['cpu_percent']).rjust(6) + " " + \
            str(round(pp['memory_percent'],1)).rjust(4) + " " + \
            str(ctime).rjust(10) + " " + \
            pp['name'] )
  if isJson:
      jsonDict['top'] = jsonList
      print ( json.dumps([jsonDict]) )
  else:
    print( "" )
Пример #12
0
def convert(source):
    reset()
    tree = program.parse(source)
    platform = util.get_platform()
    result = process(tree, platform)
    return result
Пример #13
0
####################################################################
#########            Copyright 2016 BigSQL               ###########
####################################################################

import os
import sys
import util
import tempfile

PGC_HOME = os.getenv('PGC_HOME', '')
dir_name = os.path.join(PGC_HOME, 'pgadmin3')

print "Starting pgAdmin III..."
logfile = tempfile.gettempdir() + os.sep + "pgadmin3.log"
print "  logging to: " + logfile

if util.get_platform() == "Windows":
    pgadmin3 = dir_name + os.sep + "bin" + os.sep + "pgAdmin3.exe"
    util.system(pgadmin3 + " > " + logfile + " 2>&1")

if util.get_platform() == "Darwin":
    pgadmin3 = dir_name + os.sep + "pgAdmin3.app/Contents/MacOS/pgAdmin3"
    os.system(pgadmin3 + " > " + logfile + " 2>&1 &")

sys.exit(0)
Пример #14
0
def run(dataset, model, config, param_config, output_file):
    #ofs executable
    if util.get_platform() == 'Windows':
        ofs_exe = r'..\install\bin\SOL.exe'
    else:
        ofs_exe = '../install/bin/SOL'

    #get the dimension of the data
    data_dim = dataset.dim

    if '+' in model:
        [model,pre_model] = filter(None,model.split('+'))
    else:
        pre_model = ''

    if pre_model == 'mRMR':
        sel_feat_num_list = dataset.mrmr_l0_list
    else:
        sel_feat_num_list = dataset.l0_list

    #evaluate the result
    cmd_postfix = ' >> %s' %output_file

    dt_cmd = dataset.get_train_cmd(config['rand_num'],config['cache'])
    if dataset.class_num > 2:
        if model == 'SOFS':
            loss_cmd = ' -cn %d -loss MaxScoreSquaredHinge ' %(dataset.class_num)
        else:
            loss_cmd = ' -cn {0} -loss {1} '.format(dataset.class_num, config['mc_loss'])
    else:
        if model == 'SOFS':
            loss_cmd = ' -cn 2 -loss SquaredHinge '
        else:
            loss_cmd = ' -cn 2 -loss {0} '.format(config['bc_loss'])

    norm_cmd = ' -norm ' if config['norm'] == True else '' 

    cmd_prefix  = ofs_exe + dt_cmd + loss_cmd + norm_cmd  + ' -m %s ' %model + param_config

    if 'passes' in config:
        cmd_prefix += ' -passes %d ' %config['passes']

    for sel_num in sel_feat_num_list:
        cmd = cmd_prefix + ' -k %d' %sel_num 
        if len(pre_model) != 0:
            model_file = dataset.name + '/%s/model_%d' %(pre_model,sel_num)
            cmd += ' -im %s ' %(model_file)
        #predict file
            predict_file   = dataset.name + '/%s+%s/predict_%g.txt' %(model,pre_model, sel_num)
        else:
            predict_file   = dataset.name + '/%s/predict_%g.txt' %(model,sel_num)
        cmd += ' -op %s ' %predict_file
        cmd += cmd_postfix
        cmd = cmd.replace('/',os.sep)
        print cmd
        os.system(cmd)

    #parse the result
    result = util.ResultItem()
    print output_file
    result.parse_ofs_result(output_file)
    result.Display()

    if len(pre_model) != 0:
        for k in range(0,len(sel_feat_num_list)):
            model_file = dataset.name + '/%s/model_%d' %(pre_model,sel_feat_num_list[k])
            result.train_time[k] += util.parse_train_time(model_file)

    print '\nTraining Result: '
    result.Display()
    print '\n'

    return result
Пример #15
0
def run(dataset, model_config, output_file):
    if util.get_platform() == "Windows":
        exe_dir = "../extern/FGM_V2/x64/"
        train_exe = exe_dir + "FGM.exe"
        test_exe = exe_dir + "Predict.exe"
    else:
        exe_dir = "../extern/fgm/windows/"
        raise Exception("path to liblinear in linux is not set yet!")

    # get the dimension of the data
    data_dim = dataset.dim

    sel_feat_num_list = dataset.l0_list

    dst_folder = dataset.name + "/FGM"
    if os.path.exists(dst_folder) == False:
        os.makedirs(dst_folder)

    result = util.ResultItem()

    for sel_feat_num in sel_feat_num_list:
        model_file = dst_folder + "/model_%d.txt" % (sel_feat_num)
        predict_file = dst_folder + "/predict_%d.txt" % (sel_feat_num)
        result_file = dst_folder + "/result_%d.txt" % (sel_feat_num)
        test_file = dst_folder + "/test_%d.txt" % (sel_feat_num)

        result_once = util.ResultItem()

        if os.path.exists(result_file) == True:
            result_once.load_result(result_file)
        else:
            # evaluate the result
            train_cmd = (
                train_exe
                + " -s 12 -c 10 -B %d" % sel_feat_num
                + " %s" % dataset.get_train_file(model_config["rand_num"])
                + " %s" % model_file
            )
            train_cmd = train_cmd.replace("/", os.sep)

            print train_cmd
            start_time = time.time()
            os.system(train_cmd)
            end_time = time.time()

            # hard to evaluate train_error, set to zero
            result_once.append_value("train_error", 0)

            # parse learning time
            train_time = (float)(end_time - start_time)
            result_once.append_value("train_time", train_time)

            # predict
            test_cmd = (
                test_exe + " %s" % dataset.test_file + " %s" % model_file + " %s" % predict_file + "> %s" % test_file
            )
            test_cmd = test_cmd.replace("/", os.sep)
            print test_cmd
            start_time = time.time()
            os.system(test_cmd)
            end_time = time.time()
            test_time = (float)(end_time - start_time)

            result_once.append_value("test_time", test_time)

            test_error = parse_test_error_rate(test_file)
            result_once.append_value("test_error", test_error)

            result_once.append_value("non_zero_num", sel_feat_num)

            sparse_rate = 100.0 - sel_feat_num * 100.0 / dataset.dim
            result_once.append_value("sparse_rate", sparse_rate)

            result_once.save_result(result_file)

        result.Append(result_once)

        print "\nTraining Result: "
        result_once.Display()
        print "\n"

    result.save_result(output_file)
    return result
Пример #16
0
# FILE:     run_sol.py
# ROLE:     TODO (some explanation)
# CREATED:  2015-02-08 00:41:09
# MODIFIED: 2015-02-08 20:47:38
#This script is to run experiment automatically to test the performance of the algorithm

import sys
import os

import util

import run_mRMR
import run_bif

#sol executable
if util.get_platform() == 'Windows':
    sol_exe = r'..\install\bin\SOL.exe'
else:
    sol_exe = '../install/bin/SOL'


def get_cmd_prefix(dt, model, config):
    dt_cmd = dt.get_train_cmd(config['rand_num'], config['cache'])
    if dt.class_num > 2:
        if model == 'CW_TG' or model == 'CW_RDA':
            loss_cmd = ' -cn %d -loss MaxScoreSquaredHinge ' % (dt.class_num)
        else:
            loss_cmd = ' -cn {0} -loss {1} '.format(dt.class_num,
                                                    config['mc_loss'])
    else:
        if model == 'CW_TG' or model == 'CW_RDA':
Пример #17
0
def main():
	commands = {}
	config = None
	ignore_list = []
	settings = AttributeStore()
	tools = {}
	asset_folders = []
	
	p = argparse.ArgumentParser()
	p.add_argument(
		"-c",
		"--config", 
		dest="config_path", 
		metavar="CONFIG_FILE_PATH",
		help="Configuration file path to use when converting assets",
		required=True
	)

	p.add_argument(
		"-p",
		"--platform",
		dest="platform"
	)
	p.add_argument(
		"-y",
		"--clear-cache",
		dest="clear_cache",
		action="store_true"
	)
	p.add_argument(
		"-s",
		"--source_root",
		dest="source_root"
	)

	args = p.parse_args()
	config_cache = KeyValueCache()

	# load config
	config_data = load_config(args.config_path, config_cache)
	
	# the source_root can be specified on the command line;
	# this properly inserts it into the paths dict
	if "paths" in config_data:
		if "source_root" not in config_data["paths"]:
			if not args.source_root:
				raise Exception(
						"source_root is missing. This should be defined"
						" in a config file, or on the command line."
					)				
			else:
				# this path SHOULD be an absolute path
				config_data["paths"]["source_root"] = args.source_root

	config = AttributeStore(config_data)

	if not args.platform:
		args.platform = get_platform()
		logging.info("Target Platform is \"%s\"" % args.platform)


	# load tools
	tools_path = os.path.abspath(
		os.path.join(
		WorkingDirectory.current_directory(),
		os.path.dirname(__file__),
		"tools.conf"
		)
	)

	# get cache path
	cache = Cache(args.config_path, remove=args.clear_cache)
	cache.load()

	# conform all paths
	if getattr(config, "paths", None):
		base_path = os.path.dirname(os.path.abspath(args.config_path))

		# setup environment variables, path, etc.
		config.paths = setup_environment(base_path, config.paths, args.platform)
		
		setattr(settings, "paths", AttributeStore(config.paths))


	# parse all tools
	Tool.load_tools(
		tools,
		tools_path,
		config.tools
	)

	logging.info("Loaded %i tools." % len(tools.items()))

	# parse asset folders
	for asset_glob in config.assets:
		data = dict(
			{u"glob" : asset_glob}.items() +
			config.assets[asset_glob].items()
		)
		asset_folder = AssetFolderMask(**data)
		asset_folder.make_folders_absolute(
			settings.paths.source_root, 
			settings.paths.destination_root
		)
		asset_folders.append(asset_folder)
	logging.info("Loaded %i asset folders." % len(asset_folders))

	# check if we need to enter monitoring mode
	monitor_mode = hasattr(config, "monitor")
	if monitor_mode:
		monitor = config.monitor
		if not "url" in monitor:
			raise Exception("Monitor block requires a \"url\" parameter")

		# run monitoring
		monitor_assets(
			cache,
			settings,
			asset_folders,
			tools,
			args.platform,
			monitor["url"]
		)
	else:
		# just run through all assets
		iterate_assets(
			cache,
			settings,
			asset_folders,
			tools, 
			args.platform
		)

	# write cache to file
	cache.save()
Пример #18
0
def run(dataset, model_config, output_file):
    if util.get_platform() == 'Windows':
        exe_dir = '../extern/FGM_V2/x64/'
        train_exe = exe_dir + 'FGM.exe'
        test_exe = exe_dir + 'Predict.exe'
    else:
        exe_dir = '../extern/fgm/windows/'
        raise Exception('path to liblinear in linux is not set yet!')

    #get the dimension of the data
    data_dim = dataset.dim

    sel_feat_num_list = dataset.l0_list

    dst_folder = dataset.name + '/FGM'
    if os.path.exists(dst_folder) == False:
        os.makedirs(dst_folder)

    result = util.ResultItem()

    for sel_feat_num in sel_feat_num_list:
        model_file = dst_folder + '/model_%d.txt' % (sel_feat_num)
        predict_file = dst_folder + '/predict_%d.txt' % (sel_feat_num)
        result_file = dst_folder + '/result_%d.txt' % (sel_feat_num)
        test_file = dst_folder + '/test_%d.txt' % (sel_feat_num)

        result_once = util.ResultItem()

        if os.path.exists(result_file) == True:
            result_once.load_result(result_file)
        else:
            #evaluate the result
            train_cmd = train_exe + ' -s 12 -c 10 -B %d' % sel_feat_num + ' %s' % dataset.get_train_file(
                model_config['rand_num']) + ' %s' % model_file
            train_cmd = train_cmd.replace('/', os.sep)

            print train_cmd
            start_time = time.time()
            os.system(train_cmd)
            end_time = time.time()

            #hard to evaluate train_error, set to zero
            result_once.append_value('train_error', 0)

            #parse learning time
            train_time = (float)(end_time - start_time)
            result_once.append_value('train_time', train_time)

            #predict
            test_cmd = test_exe + ' %s' % dataset.test_file + ' %s' % model_file + ' %s' % predict_file + '> %s' % test_file
            test_cmd = test_cmd.replace('/', os.sep)
            print test_cmd
            start_time = time.time()
            os.system(test_cmd)
            end_time = time.time()
            test_time = (float)(end_time - start_time)

            result_once.append_value('test_time', test_time)

            test_error = parse_test_error_rate(test_file)
            result_once.append_value('test_error', test_error)

            result_once.append_value('non_zero_num', sel_feat_num)

            sparse_rate = 100.0 - sel_feat_num * 100.0 / dataset.dim
            result_once.append_value('sparse_rate', sparse_rate)

            result_once.save_result(result_file)

        result.Append(result_once)

        print '\nTraining Result: '
        result_once.Display()
        print '\n'

    result.save_result(output_file)
    return result
Пример #19
0
def run(dataset, model, config, param_config, output_file):
    #ofs executable
    if util.get_platform() == 'Windows':
        ofs_exe = r'..\install\bin\SOL.exe'
    else:
        ofs_exe = '../install/bin/SOL'

    #get the dimension of the data
    data_dim = dataset.dim

    if '+' in model:
        [model, pre_model] = filter(None, model.split('+'))
    else:
        pre_model = ''

    if pre_model == 'mRMR':
        sel_feat_num_list = dataset.mrmr_l0_list
    else:
        sel_feat_num_list = dataset.l0_list

    #evaluate the result
    cmd_postfix = ' >> %s' % output_file

    dt_cmd = dataset.get_train_cmd(config['rand_num'], config['cache'])
    if dataset.class_num > 2:
        if model == 'SOFS':
            loss_cmd = ' -cn %d -loss MaxScoreSquaredHinge ' % (
                dataset.class_num)
        else:
            loss_cmd = ' -cn {0} -loss {1} '.format(dataset.class_num,
                                                    config['mc_loss'])
    else:
        if model == 'SOFS':
            loss_cmd = ' -cn 2 -loss SquaredHinge '
        else:
            loss_cmd = ' -cn 2 -loss {0} '.format(config['bc_loss'])

    norm_cmd = ' -norm ' if config['norm'] == True else ''

    cmd_prefix = ofs_exe + dt_cmd + loss_cmd + norm_cmd + ' -m %s ' % model + param_config

    if 'passes' in config:
        cmd_prefix += ' -passes %d ' % config['passes']

    for sel_num in sel_feat_num_list:
        cmd = cmd_prefix + ' -k %d' % sel_num
        if len(pre_model) != 0:
            model_file = dataset.name + '/%s/model_%d' % (pre_model, sel_num)
            cmd += ' -im %s ' % (model_file)
            #predict file
            predict_file = dataset.name + '/%s+%s/predict_%g.txt' % (
                model, pre_model, sel_num)
        else:
            predict_file = dataset.name + '/%s/predict_%g.txt' % (model,
                                                                  sel_num)
        cmd += ' -op %s ' % predict_file
        cmd += cmd_postfix
        cmd = cmd.replace('/', os.sep)
        print cmd
        os.system(cmd)

    #parse the result
    result = util.ResultItem()
    print output_file
    result.parse_ofs_result(output_file)
    result.Display()

    if len(pre_model) != 0:
        for k in range(0, len(sel_feat_num_list)):
            model_file = dataset.name + '/%s/model_%d' % (pre_model,
                                                          sel_feat_num_list[k])
            result.train_time[k] += util.parse_train_time(model_file)

    print '\nTraining Result: '
    result.Display()
    print '\n'

    return result
Пример #20
0
from __future__ import print_function, division
 
####################################################################
######          Copyright (c)  2015-2020 BigSQL           ##########
####################################################################

import os, sys
import util, startup

pgver = "pg9X"

print(pgver + " reloading")

autostart = util.get_column('autostart', pgver)
if autostart == "on" and util.get_platform() == "Linux":
  rc = startup.reload_linux("postgresql" + pgver[2:4])
  sys.exit(rc)

MY_HOME = os.getenv('MY_HOME', '')
homedir = os.path.join(MY_HOME, pgver)
datadir = util.get_column('datadir', pgver)

pg_ctl = os.path.join(homedir, "bin", "pg_ctl")
parms = ' reload -D "' + datadir + '"'
rc = util.system(pg_ctl + parms)

sys.exit(rc)
Пример #21
0
def info_component(p_comp_dict, p_kount):
    if p_kount > 1:
        print(style_start + ("-" * 90) + style_end)

    print(style_start + "     Project: " + style_end + p_comp_dict['project'] + " (" + p_comp_dict['project_url'] + ")" )

    print(style_start + "   Component: " + style_end + p_comp_dict['component'] + " " + p_comp_dict['version'] + " (" + p_comp_dict['proj_description'] + ")")

    if p_comp_dict['port'] > 1:
        print(style_start + "        port: " + style_end + str(p_comp_dict['port']))

    if p_comp_dict['datadir'] > "":
        print(style_start + "     datadir: " + style_end + p_comp_dict['datadir'])

    if p_comp_dict['logdir']  > "":
        print(style_start + "      logdir: " + style_end + p_comp_dict['logdir'])

    if p_comp_dict['autostart'] == "on":
        print(style_start + "   autostart: " + style_end + p_comp_dict['autostart'])

    if p_comp_dict['svcname'] > "" and util.get_platform() == "Windows":
        print(style_start + "     svcname: " + style_end + p_comp_dict['svcname'])

    if p_comp_dict['svcuser'] > "" and util.get_platform() == "Linux":
        print(style_start + "     svcuser: "******"      status: " + style_end + p_comp_dict['status'] + \
              style_start + " for " + style_end + p_comp_dict['up_time'])
    else:
        if 'status' in p_comp_dict:
            print(style_start + "      status: " + style_end + p_comp_dict['status'])
        if 'up_time' in p_comp_dict:
            print(style_start + "    up since: " + style_end + p_comp_dict['up_time'])

    if 'data_size' in p_comp_dict:
        print(style_start + "   data size: " + style_end + p_comp_dict['data_size'])

    if 'connections' in p_comp_dict:
        print(style_start + " connections: " + style_end + p_comp_dict['connections'])

    print(style_start + "Release Date: " + style_end + p_comp_dict['release_date'] + \
          style_start + "  Stage: " + style_end + p_comp_dict['stage'])

    if p_comp_dict['platform'] > "":
      print(style_start + "Supported On: " + style_end + "[" + p_comp_dict['platform'] + "]")

    if p_comp_dict['pre_reqs'] > "":
      print(style_start + "   Pre Req's: " + style_end + p_comp_dict['pre_reqs'])

    print(style_start +   "     License: " + style_end + p_comp_dict['license'])

    is_installed = str(p_comp_dict['is_installed'])
    if str(is_installed) == "0":
       is_installed = "NO"

    print(style_start +   "   IsCurrent: " + style_end + str(p_comp_dict['is_current']) + \
          style_start +   "  IsInstalled: " + style_end + is_installed)

    if p_comp_dict['relnotes']:
        print (style_start + " Release Notes : " + style_end )
        print (p_comp_dict['relnotes'] )
Пример #22
0
if args.logdir > '':
    util.set_column("logdir", pgver, args.logdir)
else:
    ## DATA ###############################################
    data_root = os.path.join(MY_HOME, "data")
    if not os.path.isdir(data_root):
        os.mkdir(data_root)

    ## LOGS ###############################################
    data_root_logs = os.path.join(data_root, "logs")
    if not os.path.isdir(data_root_logs):
        os.mkdir(data_root_logs)
    pg_log = os.path.join(data_root_logs, pgver)
    if not os.path.isdir(pg_log):
        os.mkdir(pg_log)
    if util.get_platform() == "Windows":
        print("Giving current user permission to log dir")
        cur_user = getpass.getuser()
        batcmd = 'icacls "' + pg_log + '" /grant "' + cur_user + \
                 '":(OI)(CI)F'
        err = os.system(batcmd)
        if err:
            msg = "ERROR: Unable to set permissions on log dir " + \
                  " (err=" + str(err) + ")"
            util.fatal_error(msg)
    util.set_column("logdir", pgver, pg_log)

if args.svcname > '':
    util.set_column("svcname", pgver, args.svcname)

## AUTOSTART ###########################################
Пример #23
0

isJson = os.getenv("isJson", None)
msg = pgver + " starting on port " + str(port)
if isJson:
  jsonMsg = {}
  jsonMsg['status'] = "wip"
  jsonMsg['component'] = pgver
  jsonMsg['msg'] = msg
  print(json.dumps([jsonMsg]))
else:
  print(msg)

cmd = sys.executable + " " + homedir + os.sep  + "run-pgctl.py"

if util.get_platform() == "Windows":  
  if autostart == "on":
    command = 'sc start "'+ svcname + '"'
    print(' ' + command)
    util.system(command, is_admin=True)
  else:
    if isJson:
      isShell = False
      startCmd = cmd.split()
    else:
      isShell = True
      startCmd = "START /MIN " + cmd
    subprocess.Popen(startCmd, creationflags=subprocess.CREATE_NEW_PROCESS_GROUP, close_fds=True, shell=isShell)
elif util.get_platform() == "Darwin" and autostart == "on":
  if not os.path.isdir(logdir):
    os.mkdir(logdir)
Пример #24
0
#!/usr/bin/env python

import os
import sys
import argparse
import json
from util import get_platform

root_path = os.path.dirname(os.path.realpath(__file__))
default_config = os.path.join(root_path, "config.json")
platform_str = get_platform()


def read_json(json_path):
    data = {}
    if os.path.isfile(json_path):
        with open(json_path, "r") as f:
            data = json.load(f)
    return data


def get_configs(config_json):
    json_data = read_json(config_json)
    configs = {}
    configs["author"] = json_data["author"]
    configs["email"] = json_data["email"]
    platfrom_config = json_data[platform_str]
    configs.update(platfrom_config)
    return configs

Пример #25
0
# FILE:     run_sol.py
# ROLE:     TODO (some explanation)
# CREATED:  2015-02-08 00:41:09
# MODIFIED: 2015-02-08 20:47:38
#This script is to run experiment automatically to test the performance of the algorithm

import sys
import os

import util

import run_mRMR
import run_bif

#sol executable
if util.get_platform() == 'Windows':
    sol_exe = r'..\install\bin\SOL.exe'
else:
    sol_exe = '../install/bin/SOL'

def get_cmd_prefix(dt, model, config):
    dt_cmd = dt.get_train_cmd(config['rand_num'],config['cache'])
    if dt.class_num > 2:
        if model == 'CW_TG' or model == 'CW_RDA':
            loss_cmd = ' -cn %d -loss MaxScoreSquaredHinge ' %(dt.class_num)
        else:
            loss_cmd = ' -cn {0} -loss {1} '.format(dt.class_num, config['mc_loss'])
    else:
        if model == 'CW_TG' or model == 'CW_RDA':
            loss_cmd = ' -cn 2 -loss SquaredHinge '
        else:
Пример #26
0
if not os.path.isdir(data_root):
    os.mkdir(data_root)

if args.datadir == "":
    pg_data = os.path.join(data_root, pgver)
else:
    pg_data = args.datadir

if not os.path.isdir(pg_data):
    os.mkdir(pg_data)

## SVCUSER ###########################################
svcuser = ""
curr_user = ""

if util.get_platform() == "Linux":
    svcuser = args.svcuser
    if util.is_admin():
        if svcuser == "":
            svcuser = "******"
    else:
        if svcuser > "":
            fatal_error("ERROR: --svcuser cannot be specified if not root")
        svcuser = util.get_user()
        curr_user = svcuser

## PASSWD #############################################
is_password = False
pgpass_file = pg_home + os.sep + ".pgpass"
if args.pwfile:
    pgpass_file = args.pwfile
Пример #27
0
from __future__ import print_function, division
 
####################################################################
######          Copyright (c)  2015-2019 BigSQL           ##########
####################################################################

import os, sys
import util, startup

pgver = "pg9X"

autostart = util.get_column('autostart', pgver)
if autostart != "on":
  sys.exit(0)

dotver = pgver[2] + "." + pgver[3]
APG_HOME = os.getenv('APG_HOME', '')
svcname   = util.get_column('svcname', pgver, 'PostgreSQL ' + dotver + ' Server')

if util.get_platform() == "Windows":
  sc_path = os.getenv("SYSTEMROOT", "") + os.sep + "System32" + os.sep + "sc"
  command = sc_path + ' delete "' + svcname + '"'
  util.system(command, is_admin=True)
elif util.get_platform() == "Linux":
  startup.remove_linux("postgresql" + pgver[2:4], "85", "15")
Пример #28
0
def run(dataset, model_config, output_file):
    if util.get_platform() == 'Windows':
        exe_dir = '../extern/liblinear/windows/'
        train_exe = exe_dir + 'train.exe'
        test_exe = exe_dir + 'predict.exe'
    else:
        exe_dir = '../extern/liblinear/windows/'
        raise Exception('path to liblinear in linux is not set yet!')

    dst_folder = dataset.name + '/liblinear'
    if os.path.exists(dst_folder) == False:
        os.makedirs(dst_folder)

    result = util.ResultItem()

    c_list = dataset.c_list
    for c in c_list:
        model_file =  dst_folder + '/model_%g.txt' %(c)
        predict_file   = dst_folder + '/predict_%g.txt' %(c)
        result_file   = dst_folder + '/result_%g.txt' %(c)
        test_file   = dst_folder + '/test_%g.txt' %(c)

        result_once = util.ResultItem()
        if os.path.exists(result_file) == True:
            result_once.load_result(result_file)
        else:
            #clear the result file
            open(test_file,'w').close()


            #training 
            train_cmd = train_exe + ' -s 5 -c %f' %c + ' %s' %dataset.get_train_file(model_config['rand_num']) + ' %s' %model_file
            train_cmd = train_cmd.replace('/',os.sep)
            print train_cmd
            start_time =time.time()
            os.system(train_cmd)
            end_time = time.time()

            #hard to evaluate train_error, set to zero
            result_once.append_value('train_error',0)

            #parse learning time
            train_time = (float)(end_time - start_time) 
            result_once.append_value('train_time',train_time)

            #predict
            test_cmd = test_exe + ' %s' %dataset.test_file + ' %s' %model_file + ' %s' %predict_file + '>> %s' %test_file
            test_cmd = test_cmd.replace('/',os.sep)
            print test_cmd
            start_time =time.time()
            os.system(test_cmd)
            end_time = time.time()
            test_time = (float)(end_time - start_time) 

            result_once.append_value('test_time',test_time)

            test_error = parse_test_error_rate(test_file)
            result_once.append_value('test_error',test_error)

            model_size = get_model_size(model_file)

            result_once.append_value('non_zero_num',model_size)

            sparse_rate = 100.0 - model_size * 100.0 / dataset.dim
            result_once.append_value('sparse_rate', sparse_rate)

            result_once.save_result(result_file)

        result.Append(result_once)

        print '\nTraining Result: '
        result_once.Display()
        print '\n'

    result.save_result(output_file)
    return result