コード例 #1
0
ファイル: benchmark_cycle.py プロジェクト: Altoros/YCSB
def _get_db_log_path(conf):
    logs = []
    if conf.db_logs_dir:
        log_file_path = lambda f : path(conf.db_logs_dir) + f
        collect_logs = conf.db_logs_files if conf.db_logs_files else ls(conf.db_logs_dir)
        for file in collect_logs:
            logs.append(log_file_path(file))
    return logs
コード例 #2
0
ファイル: benchmark_cycle.py プロジェクト: Altoros/YCSB
def _get_db_log_path(conf):
    logs = []
    if conf.db_logs_dir:
        log_file_path = lambda f: path(conf.db_logs_dir) + f
        collect_logs = conf.db_logs_files if conf.db_logs_files else ls(
            conf.db_logs_dir)
        for file in collect_logs:
            logs.append(log_file_path(file))
    return logs
コード例 #3
0
ファイル: score.py プロジェクト: nkhuyu/automl-phase-2
 if os.path.isfile(firstpost_name):
     # Record time of firstpost file creation
     start_time = os.path.getmtime(firstpost_name)
     # Makedir if necessary
     util.mkdir(os.path.join(score_dir, dataset_name, algo_name))
     # Load info about this dataset
     info_file = os.path.join(data_dir, dataset_name, '%s_public.info' % dataset_name)
     info = libscores.get_info(info_file)
     # FIXME HACK
     info['metric'] = 'auc_metric'
     # END FIXME HACK
     # Load solution for this dataset
     solution_file = os.path.join(data_dir, dataset_name, '%s_%s.solution' % (dataset_name, TEST))
     solution = libscores.read_array(solution_file)
     # For each set of predictions
     prediction_files = util.ls(os.path.join(pred_dir, '%s_%s_*.predict' % (dataset_name, TEST)))
     for prediction_file in prediction_files:
         # Time of file creation since algorithm start
         file_time = os.path.getmtime(prediction_file) - start_time
         # Open predictions
         prediction = libscores.read_array(prediction_file)
         # Check predictions match shape of solution
         if solution.shape != prediction.shape:
             raise ValueError("Mismatched prediction shape {} vs. {}".format(prediction.shape, solution.shape))
         # Score
         if info['metric'] == 'r2_metric' or info['metric'] == 'a_metric':
             # Remove NaN and Inf for regression
             solution = libscores.sanitize_array(solution)
             prediction = libscores.sanitize_array(prediction)
             # TODO - remove eval
             score = eval(info['metric'] + '(solution, prediction, "' + info['task'] + '")')
コード例 #4
0
ファイル: test_scaleout.py プロジェクト: ducky-hong/nbase-arc
    def test_delete_smrlog_after_scaleout(self):
        util.print_frame()

        # start load generator
        util.log("start load_generator")
        for i in range(self.max_load_generator):
            ip, port = util.get_rand_gateway(self.cluster)
            self.load_gen_thrd_list[i] = load_generator.LoadGenerator(i, ip, port)
            self.load_gen_thrd_list[i].start()

        time.sleep(5) # generate load for 5 sec
        util.log("started load_generator")

        # servers for scale out
        servers = [config.server4, config.server5, config.server6]
        leader_cm = self.cluster['servers'][0]

        # Scale out
        cluster = config.clusters[0]
        ret = util.pg_add(cluster, servers, leader_cm)
        self.assertEqual(True, ret, 'Scale out fail. util.pg_add returns false')

        time.sleep(5)
        # pg0 -> pg1
        cluster = config.clusters[1]
        ret = util.migration(cluster, 0, 1, 8000, 8191, 40000)
        self.assertEqual(True, ret, 'Migration Fail 0 -> 1')

        # get log file
        old_logs = {}
        for s in config.clusters[0]['servers']:
            parent_dir, log_dir = util.smr_log_dir(s['id'])
            path = '%s/%s' % (parent_dir, log_dir)
            old_logs[s['id']] = util.ls(path)

        # bgsave in order to make smrlogs deleted.
        for s in config.clusters[0]['servers']:
            bgsave_ret = util.bgsave(s)
            self.assertTrue(bgsave_ret, 'failed to bgsave. pgs%d' % s['id'])
            util.log('bgsave pgs%d is done.')

        # check consistency
        ok = True
        for j in range(len(self.load_gen_thrd_list)):
            self.assertTrue(self.load_gen_thrd_list[j].isConsistent(),
                    'Inconsistent after migration')

        # is smr-replicator delete smrlogs?
        i = 0
        while i < 20:
            i += 1
            # get current log files
            cur_logs = {}
            for s in config.clusters[0]['servers']:
                parent_dir, log_dir = util.smr_log_dir(s['id'])
                path = '%s/%s' % (parent_dir, log_dir)
                cur_logs[s['id']] = util.ls(path)

            # compare old and new
            temp_old_logs = copy.deepcopy(old_logs)
            for id, nl in cur_logs.items():
                ol = temp_old_logs.get(id)
                self.assertNotEqual(ol, None, "failed to check logfiles. old logs for smr-replicator '%d' is not exist." % id)

                for log in nl:
                    if log in ol:
                        ol.remove(log)

            ok = True
            for id, ol in temp_old_logs.items():
                if len(ol) == 0:
                    ok = False

            util.log('Loop %d ---------------------------------------------------------' % i)
            util.log('deleted smrlog files: %s' % util.json_to_str(temp_old_logs))

            if ok:
                break

            time.sleep(10)

        self.assertTrue(ok, 'smr-replicator does not delete smrlogs.')
        util.log('smr-replicator deletes smrlogs.')

        # check consistency of load_generator
        for i in range(len(self.load_gen_thrd_list)):
            self.load_gen_thrd_list[i].quit()
        for i in range(len(self.load_gen_thrd_list)):
            self.load_gen_thrd_list[i].join()
            self.assertTrue(self.load_gen_thrd_list[i].isConsistent(), 'Inconsistent after migration')
コード例 #5
0
ファイル: main.py プロジェクト: merisbahti/password-generator
#!/usr/bin/python
from util import rand, ls, load, promptint, promptstr
print("XKCD Password Generator.")
dicts = ls("/usr/share/dict")

print("{:d} files found in /usr/share/dict".format(len(dicts)))
print("Choose which one to use 0-{:d}".format(len(dicts)-1))
for i in range(len(dicts)):
    print(str(i) + ": "+dicts[i].split("/")[-1])

d  = load("/usr/share/dict/"+dicts[promptint("Choice (0-{:d}): ".format(len(dicts)-1))], 
        promptint("Minimum word length: "), 
        promptint("Maximum word length: "))

pw = ""
for i in range(promptint("Number of words: ")):
    pw = pw + d[rand(len(d))][:-1] + " "

print(pw)

コード例 #6
0
ファイル: test_scaleout.py プロジェクト: swtalk/nbase-arc
    def test_delete_smrlog_after_scaleout(self):
        util.print_frame()

        # start load generator
        util.log("start load_generator")
        for i in range(self.max_load_generator):
            ip, port = util.get_rand_gateway(self.cluster)
            self.load_gen_thrd_list[i] = load_generator.LoadGenerator(
                i, ip, port)
            self.load_gen_thrd_list[i].start()

        time.sleep(5)  # generate load for 5 sec
        util.log("started load_generator")

        # servers for scale out
        servers = [config.server4, config.server5, config.server6]
        leader_cm = self.cluster['servers'][0]

        # Scale out
        cluster = config.clusters[0]
        ret = util.pg_add(cluster, servers, leader_cm)
        self.assertEqual(True, ret,
                         'Scale out fail. util.pg_add returns false')

        time.sleep(5)
        # pg0 -> pg1
        cluster = config.clusters[1]
        ret = util.migration(cluster, 0, 1, 8000, 8191, 40000)
        self.assertEqual(True, ret, 'Migration Fail 0 -> 1')

        # get log file
        old_logs = {}
        for s in config.clusters[0]['servers']:
            parent_dir, log_dir = util.smr_log_dir(s['id'])
            path = '%s/%s' % (parent_dir, log_dir)
            old_logs[s['id']] = util.ls(path)

        # bgsave in order to make smrlogs deleted.
        for s in config.clusters[0]['servers']:
            bgsave_ret = util.bgsave(s)
            self.assertTrue(bgsave_ret, 'failed to bgsave. pgs%d' % s['id'])
            util.log('bgsave pgs%d is done.')

        # check consistency
        ok = True
        for j in range(len(self.load_gen_thrd_list)):
            self.assertTrue(self.load_gen_thrd_list[j].isConsistent(),
                            'Inconsistent after migration')

        # is smr-replicator delete smrlogs?
        i = 0
        while i < 20:
            i += 1
            # get current log files
            cur_logs = {}
            for s in config.clusters[0]['servers']:
                parent_dir, log_dir = util.smr_log_dir(s['id'])
                path = '%s/%s' % (parent_dir, log_dir)
                cur_logs[s['id']] = util.ls(path)

            # compare old and new
            temp_old_logs = copy.deepcopy(old_logs)
            for id, nl in cur_logs.items():
                ol = temp_old_logs.get(id)
                self.assertNotEqual(
                    ol, None,
                    "failed to check logfiles. old logs for smr-replicator '%d' is not exist."
                    % id)

                for log in nl:
                    if log in ol:
                        ol.remove(log)

            ok = True
            for id, ol in temp_old_logs.items():
                if len(ol) == 0:
                    ok = False

            util.log(
                'Loop %d ---------------------------------------------------------'
                % i)
            util.log('deleted smrlog files: %s' %
                     util.json_to_str(temp_old_logs))

            if ok:
                break

            time.sleep(10)

        self.assertTrue(ok, 'smr-replicator does not delete smrlogs.')
        util.log('smr-replicator deletes smrlogs.')

        # check consistency of load_generator
        for i in range(len(self.load_gen_thrd_list)):
            self.load_gen_thrd_list[i].quit()
        for i in range(len(self.load_gen_thrd_list)):
            self.load_gen_thrd_list[i].join()
            self.assertTrue(self.load_gen_thrd_list[i].isConsistent(),
                            'Inconsistent after migration')
コード例 #7
0
import util

mods = util.ls("./mods-enabled")

for mod in mods:
    __import__(mod)
    print "imported mod %s" % mod
コード例 #8
0
 # Record time of firstpost file creation
 start_time = os.path.getmtime(firstpost_name)
 # Makedir if necessary
 util.mkdir(os.path.join(score_dir, dataset_name, algo_name))
 # Load info about this dataset
 info_file = os.path.join(data_dir, dataset_name,
                          '%s_public.info' % dataset_name)
 info = libscores.get_info(info_file)
 # Load solution for this dataset
 solution_file = os.path.join(
     data_dir, dataset_name,
     '%s_%s.solution' % (dataset_name, TEST))
 solution = libscores.read_array(solution_file)
 # For each set of predictions
 prediction_files = util.ls(
     os.path.join(pred_dir,
                  '%s_%s_*.predict' % (dataset_name, TEST)))
 for prediction_file in prediction_files:
     # Time of file creation since algorithm start
     file_time = os.path.getmtime(prediction_file) - start_time
     # Open predictions
     prediction = libscores.read_array(prediction_file)
     # Check predictions match shape of solution
     if solution.shape != prediction.shape:
         raise ValueError(
             "Mismatched prediction shape {} vs. {}".format(
                 prediction.shape, solution.shape))
     # Score
     if info['metric'] == 'r2_metric' or info[
             'metric'] == 'a_metric':
         # Remove NaN and Inf for regression
コード例 #9
0
 def _build_video_paths(self, video_dir):
     video_paths = ls(video_dir)
     self.video_paths = [os.path.join(video_dir, f) for f in video_paths]
コード例 #10
0
ファイル: validation.py プロジェクト: YanLiqi/charlatan
 def _count_images(self):
     filenames = ls(self.image_dir)
     return len(filenames) // 3