Exemplo n.º 1
0
 def configure_name(self, _):
     config.run()
     f = open('.name', 'r+')
     names = f.read()
     rumps.alert("We think your name is one of "+names)
     window = rumps.Window(title="if that's incorrect, correct it here", default_text=names.split(',')[0], dimensions=(100, 20))
     name = window.run().text
     f.write(","+name)
     f.close()
Exemplo n.º 2
0
 def run(self, **kwargs):
   command_list = []
   for suite in self.suites:
     suite.setup(verbose = self.args.verbose, **kwargs)
     command_list += suite.get_command_list()
   if self.args.runmode == "sequential":
     output_list = config.run(command_list, verbose = self.args.verbose)
   else:
     output_list = config.run_parallel(command_list, verbose=self.args.verbose)
   i = 0
   for suite in self.suites:
     suite.parse_output_list(output_list[i:i + len(suite.get_command_list())])
     i += len(suite.get_command_list())
   if self.args.output != '':
     o = open(self.args.output, "w")
   for suite in self.suites:
     tablecopy = copy.deepcopy(self.table)
     tablecopy.set_cols_align(suite.get_align())
     tablecopy.set_cols_dtype(suite.get_dtype())
     if self.args.format == "term":
       if suite.get_cols_width(80):
         tablecopy.set_cols_width(suite.get_cols_width(80))
     tablecopy.add_rows([suite.get_header()] + suite.get_rows())
     if self.args.output != '':
       o.write(tablecopy.draw())
       o.write("\n")
       o.write("\n")
     print(tablecopy.draw())
   if self.args.output != '':
     o.close()
Exemplo n.º 3
0
def postcommithook(environ):
    payload = parse_qs(environ['wsgi.input'].read())['payload'][0]
    meta = loads(payload)
    for commit in meta['commits']:
        print 'Running build/test for commit %s' % commit['id']
        err = config.run()
        if err is None:
            send_mail("Successfully built/tested %s at revision %s" %
                      (config.PROJECT_NAME, commit['id']))
        else:
            send_mail("Building/testing %s at revision %s failed:\n\n%s" %
                      (config.PROJECT_NAME, commit['id'], err))
        print 'done'
    return 'yo build done'
Exemplo n.º 4
0
test_data = terminal.make("tests/common/test_data")
for (data_type, fill_type) in itertools.product(data_types, fill_types):
    terminal.call("cd {}; {} {}".format(
        os.path.join(terminal.top, "tests/checks/data"), test_data,
        terminal.flags(["N", "d", "f"], [4095, data_type, fill_type])))

d_checks = [checks.ValidateExternalRDSUMTest,\
            checks.ValidateExternalRDASUMTest,\
            checks.ValidateExternalRDNRM2Test,\
            checks.ValidateExternalRDDOTTest]
for data in d_data:
    for check in d_checks:
        check = check()
        check.setup(flags=terminal.flags(["i", "r"], [data, ""]))
        config.run(check.get_command_list())

z_checks = [checks.ValidateExternalRZSUMTest,\
            checks.ValidateExternalRDZASUMTest,\
            checks.ValidateExternalRDZNRM2Test,\
            checks.ValidateExternalRZDOTUTest,\
            checks.ValidateExternalRZDOTCTest]

for data in z_data:
    for check in z_checks:
        check = check()
        check.setup(flags=terminal.flags(["i", "r"], [data, ""]))
        config.run(check.get_command_list())

s_checks = [checks.ValidateExternalRSSUMTest,\
            checks.ValidateExternalRSASUMTest,\
Exemplo n.º 5
0
import fitness
import config
import ksp
import os

if __name__ == '__main__':
    ksp.Ksp.game = ksp.Ksp()
    local_dir = os.path.dirname(__file__)
    config_path = os.path.join(local_dir, 'NeatConfig.cfg')
    config.run(config_path)
                    doc_ref_date = db.collection(u'data').document(
                        username).collection(today)
                    doc_ref_date.add({
                        u'Date': datetime,
                        u'Noodle': noodle_today
                    })

                except Exception as e:
                    print(e)
            else:
                text = f'WANNA BE FAT ISIT? STOP EATING!!!'
                update.message.reply_text(text)

        elif (command == "check"):
            number_left = total_noodles - number_of_noodles
            print('number left ', number_left)
            text = f'You have eaten {number_of_noodles} packets of noodle(s), {number_left} left for the month.'
            update.message.reply_text(text)

        else:
            update.message.reply_text(
                'Please only enter - check/oops commands')
    except:
        update.message.reply_text(
            'STH WRONG Please only enter - check/oops commands')


if __name__ == "__main__":
    start_bot()
    run(updater)
Exemplo n.º 7
0
            if values[-1] == '':
                last_value = values[0]
                win = True
                for value in values[1:-1]:
                    if value < last_value:
                        win = False
                        break
                    last_value = value
                game_state['win'] = win
                if win and DB_ACTIVE:
                    result = Results(move_count=game_state['move_count'])
                    db.session.add(result)
                    db.session.commit()
    game_state = json.dumps(game_state)
    if moved:
        redis_client.set('game_state', game_state)
    return game_state


@app.route('/api/last_results/<int(min=1):number>')
def last_results(number):
    results = []
    if DB_ACTIVE:
        for r in Results.query.order_by(Results.id.desc()).limit(number).all():
            results.append(r.move_count)
    return json.dumps(results)


if __name__ == '__main__':
    run()
Exemplo n.º 8
0
def run(apis):
  #setup
  args = argparser.parse_args()
  parameter_space = generate.deserialize_parameter_space(args.params)
  arguments_file_name = args.args
  if os.path.isfile(arguments_file_name):
    arguments = generate.deserialize_arguments(arguments_file_name)
  else:
    arguments = parameter_space.get_default_arguments()

  for i in range(args.trials):
    print("{}/{}".format(i, args.trials))

    #create a new argument file
    test_arguments = copy.deepcopy(arguments)
    apis_to_run = []
    desired_results = []
    for api in apis:
      desired_result = api.get_next_desired_result()
      if not desired_result:
        continue
      desired_results.append(desired_result)
      for (parameter, argument) in desired_result.configuration.data.items():
        parameter = parameter_space.parameters[parameter]
        if type(parameter) == generate.IntegerParameter:
          test_arguments[parameter.name] = argument * parameter.step
        if type(parameter) == generate.BooleanParameter:
          test_arguments[parameter.name] = argument
        if type(parameter) == generate.PowerOfTwoParameter:
          test_arguments[parameter.name] = argument
      apis_to_run.append(api)

    if apis_to_run:
      #build with these arguments
      generate.serialize_arguments(test_arguments, arguments_file_name)
      terminal.make_clean("./")

      bench_tests = []
      command_list = []
      for api in apis_to_run:
        bench_test = benchs.all_benchs[api.measurement_interface.benchmark]
        bench_tests.append(bench_test[0]())
        bench_tests[-1].setup(flagss = bench_test[1], attribute="%peak", args=arguments_file_name, remake=True, verbose=args.verbose)
        command_list += bench_tests[-1].get_command_list()

      #run with these arguments
      output_list = config.run(command_list, verbose=args.verbose)

      #return the results to the apis
      for api, desired_result, bench_test in zip(apis_to_run, desired_results, bench_tests):
        bench_test.parse_output_list(output_list[:len(bench_test.get_command_list())])
        output_list = output_list[len(bench_test.get_command_list()):]
        result = Result(time=(100.0/bench_test.get_result()))
        api.report_result(desired_result, result)

  #parse the best configurations
  best_arguments = copy.deepcopy(arguments)
  for api in apis:
    api.search_driver.process_new_results()
    for (parameter, argument) in api.get_best_configuration().items():
      parameter = parameter_space.parameters[parameter]
      if type(parameter) == generate.IntegerParameter:
        best_arguments[parameter.name] = argument * parameter.step
      if type(parameter) == generate.BooleanParameter:
        best_arguments[parameter.name] = argument
      if type(parameter) == generate.PowerOfTwoParameter:
        best_arguments[parameter.name] = argument
    api.finish()
  generate.serialize_arguments(best_arguments, arguments_file_name)
Exemplo n.º 9
0
def run(apis):
    #setup
    args = argparser.parse_args()
    parameter_space = generate.deserialize_parameter_space(args.params)
    arguments_file_name = args.args
    if os.path.isfile(arguments_file_name):
        arguments = generate.deserialize_arguments(arguments_file_name)
    else:
        arguments = parameter_space.get_default_arguments()

    for i in range(args.trials):
        print("{}/{}".format(i, args.trials))

        #create a new argument file
        test_arguments = copy.deepcopy(arguments)
        apis_to_run = []
        desired_results = []
        for api in apis:
            desired_result = api.get_next_desired_result()
            if not desired_result:
                continue
            desired_results.append(desired_result)
            for (parameter,
                 argument) in desired_result.configuration.data.items():
                parameter = parameter_space.parameters[parameter]
                if type(parameter) == generate.IntegerParameter:
                    test_arguments[parameter.name] = argument * parameter.step
                if type(parameter) == generate.BooleanParameter:
                    test_arguments[parameter.name] = argument
                if type(parameter) == generate.PowerOfTwoParameter:
                    test_arguments[parameter.name] = argument
            apis_to_run.append(api)

        if apis_to_run:
            #build with these arguments
            generate.serialize_arguments(test_arguments, arguments_file_name)
            terminal.make_clean("./")

            bench_tests = []
            command_list = []
            for api in apis_to_run:
                bench_test = benchs.all_benchs[
                    api.measurement_interface.benchmark]
                bench_tests.append(bench_test[0]())
                bench_tests[-1].setup(flagss=bench_test[1],
                                      attribute="%peak",
                                      args=arguments_file_name,
                                      remake=True,
                                      verbose=args.verbose)
                command_list += bench_tests[-1].get_command_list()

            #run with these arguments
            output_list = config.run(command_list, verbose=args.verbose)

            #return the results to the apis
            for api, desired_result, bench_test in zip(apis_to_run,
                                                       desired_results,
                                                       bench_tests):
                bench_test.parse_output_list(
                    output_list[:len(bench_test.get_command_list())])
                output_list = output_list[len(bench_test.get_command_list()):]
                result = Result(time=(100.0 / bench_test.get_result()))
                api.report_result(desired_result, result)

    #parse the best configurations
    best_arguments = copy.deepcopy(arguments)
    for api in apis:
        api.search_driver.process_new_results()
        for (parameter, argument) in api.get_best_configuration().items():
            parameter = parameter_space.parameters[parameter]
            if type(parameter) == generate.IntegerParameter:
                best_arguments[parameter.name] = argument * parameter.step
            if type(parameter) == generate.BooleanParameter:
                best_arguments[parameter.name] = argument
            if type(parameter) == generate.PowerOfTwoParameter:
                best_arguments[parameter.name] = argument
        api.finish()
    generate.serialize_arguments(best_arguments, arguments_file_name)
Exemplo n.º 10
0
c_data = ["data/c_2_rand-1_N4095.dat","data/c_normal_N4095.dat", "data/c_rand+(rand-1)_N4095.dat", "data/c_rand_N4095.dat", "data/c_rand_cond_N4095.dat", "data/c_sine_N4095.dat", "data/c_small+grow_big_N4095.dat", "data/c_small+rand_big_N4095.dat"]


test_data = terminal.make("tests/common/test_data")
for (data_type, fill_type) in itertools.product(data_types, fill_types):
  terminal.call("cd {}; {} {}".format(os.path.join(terminal.top, "tests/checks/data"), test_data, terminal.flags(["N", "d", "f"], [4095, data_type, fill_type])))

d_checks = [checks.ValidateExternalRDSUMTest,\
            checks.ValidateExternalRDASUMTest,\
            checks.ValidateExternalRDNRM2Test,\
            checks.ValidateExternalRDDOTTest]
for data in d_data:
  for check in d_checks:
    check = check()
    check.setup(flags=terminal.flags(["i", "r"], [data, ""]))
    config.run(check.get_command_list())

z_checks = [checks.ValidateExternalRZSUMTest,\
            checks.ValidateExternalRDZASUMTest,\
            checks.ValidateExternalRDZNRM2Test,\
            checks.ValidateExternalRZDOTUTest,\
            checks.ValidateExternalRZDOTCTest]

for data in z_data:
  for check in z_checks:
    check = check()
    check.setup(flags=terminal.flags(["i", "r"], [data, ""]))
    config.run(check.get_command_list())

s_checks = [checks.ValidateExternalRSSUMTest,\
            checks.ValidateExternalRSASUMTest,\