def bench(start_env):
    def print_generator():
        ESTIMATED_TIME_PER_VAR = 60 * 60 * 2.25  # In seconds
        MIN_CR = 1000  # Minimum number of communication rounds
        C = [0.1]
        E = [1, 5, 10]

        combos = list(itertools.product(C, E))
        combos.append((0.2, 1))

        for (C, E) in combos:
            timestamp = fed_avg.print_assignment(
                dataset='MNIST-non-iid',
                variant='1',
                chosen_client_proportion=C,
                run_time=ESTIMATED_TIME_PER_VAR,
                communication_rounds=MIN_CR,
                epochs=E,
                batch_size=20,
                learning_rate=0.11,
                lr_decay=2.2e-7,
            )
            time.sleep(ESTIMATED_TIME_PER_VAR
                       )  # The estimated time will not hold for the last iter
            yield timestamp
        print("Done!")

    # Run benchmark
    benchmark.run(print_generator(), start_env)
def bench(start_env):
    def print_generator():
        Bls = [32]
        Bus = [67]
        CRs = [1000]
        Times = [None]

        for (Bl, Bu, CR, Time) in zip(Bls, Bus, CRs, Times):
            if Time == None:
                stop_time = d.datetime(2018, 4, 23, 7, 57)
                Time = (stop_time - d.datetime.now()).total_seconds()
            timestamp = coop.print_assignment(
                dataset='MNIST-non-iid',
                run_time=Time,
                communication_rounds=CR,
                report_frequency=10,
                lower_age_limit=Bl,
                upper_age_limit=Bu,
                epochs=1,
                batch_size=20,
                learning_rate=0.05,
                lr_decay=1e-5,
            )
            time.sleep(Time)
            yield timestamp
        print("Done!")

    # Run benchmark
    benchmark.run(print_generator(), start_env)
def bench(start_server_locally):
    def print_generator():
        NUM_RANDOM_SAMPLES = 20
        SEED = 7
        np.random.seed(SEED)
        decays = [10**x for x in np.random.uniform(-8, -3, NUM_RANDOM_SAMPLES)]
        learning_rates = np.random.uniform(0.02, 0.15, NUM_RANDOM_SAMPLES)
        random_samples = list(zip(learning_rates, decays))

        idxs = [0, 2, 3, 5, 8, 9, 10, 11, 17, 18]

        BENCH_TIME_PER_VAR = 0
        MIN_CR = 10  # Minimum number of communication rounds

        for lr, decay in [random_samples[i] for i in idxs]:
            timestamp = fed_avg.print_assignment(
                dataset='MNIST-non-iid',
                variant='1',
                chosen_client_proportion=1.0,
                run_time=BENCH_TIME_PER_VAR,
                communication_rounds=MIN_CR,
                epochs=1,
                batch_size=10,
                learning_rate=lr,
                lr_decay=decay,
            )

            time.sleep(BENCH_TIME_PER_VAR)
            yield timestamp
        print("Death Blossom activated")
        os.system('./exit.escript')  # Kills "python3", but not "python"

    # Run benchmark
    benchmark.run(print_generator(), start_server_locally)
Пример #4
0
def bench(start_env):

    def print_generator():
        BENCH_TIME_PER_VAR = 60*60*28 # In seconds
        MIN_CR = 1000 # Minimum number of communication rounds
        C50 = [0.5]
        E50 = [1, 5]
        C100 = [1.0]
        E100 = [1, 5, 10]

        combos = list(itertools.product(C50, E50))
        combos = combos + list(itertools.product(C100, E100))

        for (C, E) in combos:
            timestamp = fed_avg.print_assignment(
                            dataset = 'MNIST-non-iid',
                            variant = '1',
                            chosen_client_proportion = C,
                            run_time = BENCH_TIME_PER_VAR,
                            communication_rounds = MIN_CR,
                            epochs = E,
                            batch_size = 20,
                            learning_rate = 0.05,
                            lr_decay = 1e-5,
                            )
            time.sleep(BENCH_TIME_PER_VAR)
            yield timestamp
        print("Done!")

    # Run benchmark
    benchmark.run(print_generator(), start_env)
Пример #5
0
def bench(start_env):

    def print_generator():
        ESTIMATED_TIME_PER_VAR = 0 # In seconds
        MIN_CR = 1000 # Minimum number of communication rounds
        C = [0.2]
        E = [10, 5, 1]

        for (C, E) in itertools.product(C, E):
            timestamp = fed_avg.print_assignment(
                            dataset = 'MNIST-non-iid',
                            variant = '1',
                            chosen_client_proportion = C,
                            run_time = ESTIMATED_TIME_PER_VAR,
                            communication_rounds = MIN_CR,
                            epochs = E,
                            batch_size = 20,
                            learning_rate = 0.088,
                            lr_decay = 3.2e-6,
                            )
            time.sleep(ESTIMATED_TIME_PER_VAR)
            yield timestamp
        print("Done!")

    # Run benchmark
    benchmark.run(print_generator(), start_env)
Пример #6
0
def do_search(problem, configname, timeout, memory, debug=False):
    # TODO: Currently, do_search returns an error msg on error and
    #       None on no error, while do_translate/do_preprocess return
    #       True/False for success/no success. This should be unified.
    #       Maybe throw exceptions if something goes wrong? Also,
    #       maybe we should allow for warnings in addition to errors.
    #       The "skipped -- dir exists" stuff should maybe just be a
    #       warning.
    outdir = search_dir(problem, configname)
    if not debug:
        if os.path.exists(outdir):
            return "skipped [%s] %s -- dir exists" % (configname, problem)
        elif not os.path.exists(translate_dir(problem)):
            return "Translated files for %s not available." % problem
        elif not os.path.exists(preprocess_dir(problem)):
            return "Preprocessed files for %s not available." % problem
    if debug and not os.path.exists(translate_dir(problem)):
        # Do not abort if translation does not exist. Don't store search output.
        # (Instead, translate if necessary and always search.)
        print "Translating and Preprocessing..."
        success = do_translate(problem)
        if not success:
            return "Translating %s failed." % problem
        success = do_preprocess(problem)
        if not success:
            return "Preprocessing %s failed." % problem
    copy_files(TRANSLATE_OUTPUTS, ".", src_dir=translate_dir(problem))
    copy_files(PREPROCESS_OUTPUTS, ".", src_dir=preprocess_dir(problem))
    if debug:  # Write planner output to screen instead of file.
        planner = planner_debug_executable()
        success = benchmark.run(
            cmd=[planner] + planner_configurations.get_config(configname),
            timeout=timeout,
            memory=memory,
            status="status.log",
            stdin="output",
        )
        if success:
            delete_files(["sas_plan"])
            delete_files(["status.log"])
    else:
        planner = planner_executable()
        success = benchmark.run(
            cmd=[planner] + planner_configurations.get_config(configname),
            timeout=timeout,
            memory=memory,
            status="status.log",
            stdin="output",
            stdout="search.log",
            stderr="search.err",
        )
        if success:
            move_files(["sas_plan"], outdir)
        move_files(["search.log", "status.log"], outdir)
        move_optional_files(["search.err"], outdir)
    delete_files(PREPROCESS_OUTPUTS)
    delete_files(TRANSLATE_OUTPUTS)
    return None
Пример #7
0
def do_search(problem, configname, timeout, memory, debug=False):
    # TODO: Currently, do_search returns an error msg on error and
    #       None on no error, while do_translate/do_preprocess return
    #       True/False for success/no success. This should be unified.
    #       Maybe throw exceptions if something goes wrong? Also,
    #       maybe we should allow for warnings in addition to errors.
    #       The "skipped -- dir exists" stuff should maybe just be a
    #       warning.
    outdir = search_dir(problem, configname)
    if not debug:
        if os.path.exists(outdir):
            return "skipped [%s] %s -- dir exists" % (configname, problem)
        elif not os.path.exists(translate_dir(problem)):
            return "Translated files for %s not available." % problem
        elif not os.path.exists(preprocess_dir(problem)):
            return "Preprocessed files for %s not available." % problem
    if debug and not os.path.exists(translate_dir(problem)):
        # Do not abort if translation does not exist. Don't store search output.
        # (Instead, translate if necessary and always search.)
        print "Translating and Preprocessing..."
        success = do_translate(problem)
        if not success:
            return "Translating %s failed." % problem
        success = do_preprocess(problem)
        if not success:
            return "Preprocessing %s failed." % problem
    copy_files(TRANSLATE_OUTPUTS, ".", src_dir=translate_dir(problem))
    copy_files(PREPROCESS_OUTPUTS, ".", src_dir=preprocess_dir(problem))
    if debug: # Write planner output to screen instead of file.
        planner = planner_debug_executable()
        success = benchmark.run(
            cmd=[planner]+planner_configurations.get_config(configname),
            timeout=timeout,
            memory=memory,
            status="status.log",
            stdin="output",
            )
        if success:
            delete_files(["sas_plan"])
            delete_files(["status.log"])
    else:
        planner = planner_executable()
        success = benchmark.run(
            cmd=[planner]+planner_configurations.get_config(configname),
            timeout=timeout,
            memory=memory,
            status="status.log",
            stdin="output",
            stdout="search.log",
            stderr="search.err",
            )
        if success:
            move_files(["sas_plan"], outdir)
        move_files(["search.log", "status.log"], outdir)
        move_optional_files(["search.err"], outdir)
    delete_files(PREPROCESS_OUTPUTS)
    delete_files(TRANSLATE_OUTPUTS)
    return None
Пример #8
0
def do_translate(problem, generate_relaxed_problem=False):
    executable = translator_executable(relaxed=generate_relaxed_problem)
    benchmark.run(
        cmd=[executable, problem.domain_file(), problem.problem_file()],
        status="status.log",
        stdout="translate.log",
        stderr="translate.err",
        )
    outdir = translate_dir(problem)
    move_files(["translate.log", "status.log"], outdir)
    if move_optional_files(["translate.err"], outdir):
        # There was an error.
        return False
    else:
        move_files(TRANSLATE_OUTPUTS, outdir)
        return True
Пример #9
0
 def execute(self):
     cmd_string = './pr2plan -d %s -i %s -o %s' % (
         self.domain, self.problem, self.obs_stream)
     self.log = benchmark.Log('%s_%s_%s_transcription.log' %
                              (self.domain, self.problem, self.obs_stream))
     self.signal, self.time = benchmark.run(cmd_string, self.max_time,
                                            self.max_mem, self.log)
	def execute( self ) :
		if not self.convert_to_integers :
			cmd_string = './pr2plan -d %s -i %s -o %s -P'%(self.domain, self.problem, self.obs_stream)
		else :
			cmd_string = './pr2plan -d %s -i %s -o %s -P -Z %s'%(self.domain, self.problem, self.obs_stream, self.factor)
		self.log = benchmark.Log( '%s_%s_%s_transcription.log'%(self.domain, self.problem, self.obs_stream) )
		self.signal, self.time = benchmark.run( cmd_string, self.max_time, self.max_mem, self.log )
def bench(start_env):
    def print_generator():
        N_LINEAR_SAMPLES = 8
        NUM_RANDOM_SAMPLES = 20
        N_SAMPLES = 4
        SEED = 7
        Bls = [32 - 16] * N_SAMPLES
        Bus = [67 - 16] * N_SAMPLES
        CRs = [5000] * N_SAMPLES

        np.random.seed(SEED)
        rand_decay = [
            10**x for x in np.random.uniform(-8, -3, NUM_RANDOM_SAMPLES)
        ]
        rand_lr = np.random.uniform(0.02, 0.15, NUM_RANDOM_SAMPLES)

        linear_lr = np.linspace(0.09, 0.02, N_LINEAR_SAMPLES)
        learning_rates = rand_lr  #np.concatenate((rand_lr, linear_lr))
        lr_decays = rand_decay  #+[0]*N_LINEAR_SAMPLES

        indices = np.array([8, 9, 14, 15])
        mask = np.zeros(20, dtype=bool)
        mask[indices] = True
        lr_decays = np.array(lr_decays)[mask]
        learning_rates = learning_rates[mask]

        #(Bl, Bu, CR, Time, lr)
        bench_collection = zip(Bls, Bus, CRs, learning_rates, lr_decays)
        for tup in bench_collection:
            (Bl, Bu, CR, lr, d) = tup
            timestamp = coop.print_assignment(
                dataset='MNIST-non-iid',
                run_time=0,
                communication_rounds=CR,
                report_frequency=10,
                lower_age_limit=Bl,
                upper_age_limit=Bu,
                epochs=1,
                batch_size=20,
                learning_rate=lr,
                lr_decay=d,
            )
            yield timestamp
        print("Done!")

    # Run benchmark
    benchmark.run(print_generator(), start_env)
	def execute( self ) :
		if LAMA.greedy :
			cmd_string = './plan-greedy %s %s %s'%( self.domain, self.problem, self.result )
		else :
			cmd_string = './plan %s %s %s'%( self.domain, self.problem, self.result)
		self.log = benchmark.Log( self.log_file )
		self.signal, self.time = benchmark.run( cmd_string, self.max_time, self.max_mem, self.log )
		self.gather_data()
def run_instance(folder, command, agent, rom_path, i):
    res_filename = os.path.join(folder, 'episode.%d' % (i + 1))
    log_filename = os.path.join(folder, 'fulllog.%d' % (i + 1))

    log = benchmark.Log('%s' % (log_filename))
    benchmark.run(command, 0, 4096, log)
    if not os.path.exists('episode.1'):
        with open(res_filename, 'w') as output:
            print >> output, "Agent crashed"

    else:
        os.system('mv episode.1 %s' % res_filename)

    if agent == 'random': return
    trace_filename = 'episode.%d.trace' % (i + 1)
    trace_filename = os.path.join(folder, trace_filename)
    os.system('mv %(agent)s.search-agent.trace %(trace_filename)s' % locals())
def run_instance( folder, command, agent, rom_path, i ) :    
        res_filename = os.path.join( folder, 'episode.%d'%(i+1) ) 
        log_filename = os.path.join( folder, 'fulllog.%d'%(i+1) ) 

        log = benchmark.Log( '%s'%(log_filename) )
	benchmark.run( command, 0,4096, log )        
        if not os.path.exists( 'episode.1' ) :
                with open(res_filename,'w') as output :
                        print >> output, "Agent crashed"

        else :
                os.system( 'mv episode.1 %s'%res_filename )

        if agent == 'random' : return
        trace_filename = 'episode.%d.trace'%(i+1)
        trace_filename = os.path.join( folder, trace_filename ) 
        os.system( 'mv %(agent)s.search-agent.trace %(trace_filename)s'%locals() )
	def execute( self ) :
		if not self.convert_to_integers :
			cmd_string = './pr2plan -d %s -i %s -o %s -P'%(self.domain, self.problem, self.obs_stream)
		else :
			cmd_string = './pr2plan -d %s -i %s -o %s -P -Z %s'%(self.domain, self.problem, self.obs_stream, self.factor)

		self.log = benchmark.Log( '%s_%s_%s_transcription.log'%(self.domain, self.problem, self.obs_stream) )
		self.signal, self.time = benchmark.run( cmd_string, self.max_time, self.max_mem, self.log )
	def execute( self ) :
		if self.upper_bound is None :
			ub_string = ''
		else :
			ub_string = '-ub %s'%self.upper_bound
		cmd_string = './hsp_f -strict -dba-semantics -rm -cost -rAH -use-lse -bfs %s -v 0 -ipc %s %s > %s.soln'%( ub_string, self.domain, self.problem, self.noext_problem)
		self.log = benchmark.Log( self.log_file )
		self.signal, self.time = benchmark.run( cmd_string, self.max_time, self.max_mem, self.log )
		self.gather_data()
Пример #17
0
def do_translate(problem, generate_relaxed_problem=False):
    executable = translator_executable(relaxed=generate_relaxed_problem)
    benchmark.run(
        cmd=[executable,
             problem.domain_file(),
             problem.problem_file()],
        status="status.log",
        stdout="translate.log",
        stderr="translate.err",
    )
    outdir = translate_dir(problem)
    move_files(["translate.log", "status.log"], outdir)
    if move_optional_files(["translate.err"], outdir):
        # There was an error.
        return False
    else:
        move_files(TRANSLATE_OUTPUTS, outdir)
        return True
Пример #18
0
def run_task(benchmark, command, domain, instance, folder, timeout):
    import benchmark
    import os
    import json

    current_dir = os.getcwd()
    os.chdir(folder)
    log = 'trace.log'
    rv, time = benchmark.run(command, timeout, 4096, benchmark.Log(log))
    command_info = {}
    command_info['exit_code'] = rv
    command_info['wall_time'] = time

    if rv == 0:
        json_search_data = 'search.json'
        if os.path.exists(json_search_data):
            with open(json_search_data) as input:
                try:
                    s = input.read()
                    data = json.loads(s)
                except ValueError as e:
                    print(e)
                    print("Read from file:")
                    print(s)
                    os.chdir(current_dir)
                    return
                for k, v in data.items():
                    command_info[k] = v
        else:
            print("No json data payload found!")

    os.chdir(current_dir)
    data_folder = 'data'
    data_folder = os.path.join(data_folder, domain)
    if not os.path.exists(data_folder):
        os.makedirs(data_folder)

    info_filename = os.path.join(data_folder, '{0}.json'.format(instance))
    if not os.path.exists(info_filename):
        info = {}
        info['domain'] = domain
        info['instance'] = instance
        info['planners_data'] = {}
    else:
        with open(info_filename) as instream:
            info = json.loads(instream.read())
    info['planners_data'][command] = command_info

    with open(info_filename, 'w') as outstream:
        payload = json.dumps(info,
                             separators=(',', ':'),
                             indent=4,
                             sort_keys=True)
        outstream.write(payload)

    return payload
Пример #19
0
def do_preprocess(problem):
    copy_files(TRANSLATE_OUTPUTS, ".", src_dir=translate_dir(problem))
    executable = preprocessor_executable()
    benchmark.run(
        cmd=[executable],
        status="status.log",
        stdin="output.sas",
        stdout="preprocess.log",
        stderr="preprocess.err",
    )
    outdir = preprocess_dir(problem)
    move_files(["preprocess.log", "status.log"], outdir)
    delete_files(TRANSLATE_OUTPUTS)
    if move_optional_files(["preprocess.err"], outdir):
        # There was an error.
        return False
    else:
        move_files(PREPROCESS_OUTPUTS, outdir)
        return True
Пример #20
0
def do_preprocess(problem):
    copy_files(TRANSLATE_OUTPUTS, ".", src_dir=translate_dir(problem))
    executable = preprocessor_executable()
    benchmark.run(
        cmd=[executable],
        status="status.log",
        stdin="output.sas",
        stdout="preprocess.log",
        stderr="preprocess.err",
        )
    outdir = preprocess_dir(problem)
    move_files(["preprocess.log", "status.log"], outdir)
    delete_files(TRANSLATE_OUTPUTS)
    if move_optional_files(["preprocess.err"], outdir):
        # There was an error.
        return False
    else:
        move_files(PREPROCESS_OUTPUTS, outdir)
        return True
Пример #21
0
 def execute(self):
     if LAMA.greedy:
         cmd_string = './plan-greedy %s %s %s' % (self.domain, self.problem,
                                                  self.result)
     else:
         cmd_string = './plan %s %s %s' % (self.domain, self.problem,
                                           self.result)
     self.log = benchmark.Log(self.log_file)
     self.signal, self.time = benchmark.run(cmd_string, self.max_time,
                                            self.max_mem, self.log)
     self.gather_data()
Пример #22
0
 def execute(self):
     if self.upper_bound is None:
         ub_string = ''
     else:
         ub_string = '-ub %s' % self.upper_bound
     cmd_string = './hsp_f -strict -dba-semantics -rm -cost -rAH -use-lse -bfs %s -v 0 -ipc %s %s > %s.soln' % (
         ub_string, self.domain, self.problem, self.noext_problem)
     self.log = benchmark.Log('%s.log' % self.noext_problem)
     self.signal, self.time = benchmark.run(cmd_string, self.max_time,
                                            self.max_mem, self.log)
     self.gather_data()
Пример #23
0
def bench(start_env):
    def print_generator():

        for fold_num in range(3, 5):
            timestamp = fed_avg.print_assignment(
                dataset='MNIST_noniid_cv' + str(fold_num),
                variant='1',
                chosen_client_proportion=0.1,
                run_time=0,
                communication_rounds=1000,
                epochs=5,
                batch_size=20,
                learning_rate=0.088,
                lr_decay=3.2e-6,
            )
            yield timestamp
        print("Done!")

    # Run benchmark
    benchmark.run(print_generator(), start_env)
Пример #24
0
def run_task( benchmark, command, domain, instance, folder, timeout ) :
    import benchmark
    import os
    import json

    current_dir = os.getcwd()
    os.chdir( folder )
    log = 'trace.log'
    rv, time = benchmark.run( command, timeout, 4096, benchmark.Log(log) )
    command_info = {}
    command_info['exit_code'] = rv
    command_info['wall_time'] = time

    if rv == 0 :
        json_search_data = 'search.json'
        if os.path.exists( json_search_data ) :
            with open( json_search_data ) as input :
                try :
                    s = input.read()
                    data = json.loads( s )
                except ValueError as e:
                    print( e )
                    print( "Read from file:")
                    print( s )
                    os.chdir( current_dir )
                    return
                for k, v in data.items() :
                    command_info[k] = v
        else :
            print("No json data payload found!")

    os.chdir( current_dir )
    data_folder = 'data'
    data_folder = os.path.join( data_folder, domain )
    if not os.path.exists( data_folder ) :
        os.makedirs(data_folder)

    info_filename  = os.path.join(data_folder,'{0}.json'.format(instance))
    if not os.path.exists( info_filename ) :
        info = {}
        info['domain'] = domain
        info['instance'] = instance
        info['planners_data'] = {}
    else :
        with open( info_filename ) as instream :
            info = json.loads( instream.read() )
    info['planners_data'][command] = command_info

    with open ( info_filename, 'w' ) as outstream:
        payload = json.dumps( info, separators=(',',':'), indent=4, sort_keys = True)
        outstream.write( payload )

    return payload
Пример #25
0
def main() :
#	commands = [ 
#			('./bfs --heuristic 1 --domain ~/Sandboxes/Fast-Downward/benchmarks/blocks/domain.pddl --problem %(instance)s', 'hadd-' ),
#			('./bfs --heuristic 2 --domain ~/Sandboxes/Fast-Downward/benchmarks/blocks/domain.pddl --problem %(instance)s', 'rp-hadd-' ),
#			('./bfs --heuristic 3 --domain ~/Sandboxes/Fast-Downward/benchmarks/blocks/domain.pddl --problem %(instance)s', 'rp-hmax-' ),
#			('./bfs --heuristic 4 --domain ~/Sandboxes/Fast-Downward/benchmarks/blocks/domain.pddl --problem %(instance)s', 'ff-rp-hadd-' ),
#			('./bfs --heuristic 5 --domain ~/Sandboxes/Fast-Downward/benchmarks/blocks/domain.pddl --problem %(instance)s', 'ff-rp-hmax-' ) ]

	commands = [ 
			('./bfs --heuristic 1 --domain ~/Sandboxes/Fast-Downward/benchmarks/blocks/domain.pddl --problem %(instance)s', 'hadd-' ),
			('./bfs --heuristic 2 --domain ~/Sandboxes/Fast-Downward/benchmarks/blocks/domain.pddl --problem %(instance)s', 'rp-hadd-' )
#			('./bfs --heuristic 4 --domain ~/Sandboxes/Fast-Downward/benchmarks/blocks/domain.pddl --problem %(instance)s', 'ff-rp-hadd-' ),
#			('./bfs --heuristic 5 --domain ~/Sandboxes/Fast-Downward/benchmarks/blocks/domain.pddl --problem %(instance)s', 'ff-rp-hmax-' ) 
			]



	files = glob.glob( '/home/bowman/Sandboxes/Fast-Downward/benchmarks/blocks/probBLOCKS-*.pddl' )
	files.sort()
	os.system( 'rm -rf *.res' )


	for command, prefix in commands :
		res_file = '%sresult.csv'%prefix
		if os.path.exists( res_file ) :
			os.system( 'rm -rf %s'%res_file )
		results = []
		for instance in files :
			output = prefix + os.path.split(instance)[-1].replace('pddl','res')
			instance_command = command%locals()
	
			log = benchmark.Log( output )
			signal, time = benchmark.run( instance_command, 1800, 2048, log )
			print >> sys.stdout, "%s, %s, %s"%(instance, signal, time)
			expanded = None
			cost = None
			with open( output ) as log_output :
				for line in log_output :
					if 'expanded during' in line :
						exp_tok = line.split(':')[-1]
						expanded = exp_tok.strip()
						continue
					if 'plan cost' in line :
						cost_tok = line.split( '=' )[-1]
						cost = cost_tok.strip()
						continue
			results.append( [ os.path.split(instance)[-1].replace('.pddl','').replace('-',' '), expanded, cost ] )
		results.sort()
		with open( res_file, 'w' ) as output :
			for instance, exp, cost in results :
				print >> output, "%s,%s,%s"%(instance, exp, cost )
Пример #26
0
 def execute(self):
     if LAMA.greedy:
         #cmd_string = '/home/mk/Planning/fastdownwardplanner/fast-downward.py  --alias seq-sat-lama-2011 %s %s '%( self.domain, self.problem)
         cmd_string = '/home/mk/Planning/fastdownwardplanner/fast-downward.py  --alias lama-first %s %s ' % (
             self.domain, self.problem)
     else:
         #cmd_string = './plan %s %s %s'%( self.domain, self.problem, self.result)
         cmd_string = '/home/mk/Planning/fastdownwardplanner/fast-downward.py  --alias lama-first %s %s ' % (
             self.domain, self.problem
         )  #Michael. I could put the normal lama here?
     self.log = benchmark.Log(self.log_file)
     self.signal, self.time = benchmark.run(cmd_string, self.max_time,
                                            self.max_mem, self.log)
     self.gather_data()
def bench(start_server_locally):
    def print_generator():
        NUM_RANDOM_SAMPLES = 10
        SEED = 2018 - 3 - 28
        np.random.seed(SEED)
        decays = [10**x for x in np.random.uniform(-4, -3, NUM_RANDOM_SAMPLES)]
        learning_rates = np.random.uniform(0.05, 0.08, NUM_RANDOM_SAMPLES)
        random_samples = list(zip(learning_rates, decays))

        BENCH_TIME_PER_VAR = 0
        MIN_CR = 20  # Minimum number of communication rounds

        E = [1, 5, 10]
        B = [10, 20, 600]

        for epochs, batch_size in zip(E, B):
            for _ in range(3):
                for lr, decay in random_samples:
                    timestamp = fed_avg.print_assignment(
                        dataset='MNIST-non-iid',
                        variant='1',
                        chosen_client_proportion=1.0,
                        run_time=BENCH_TIME_PER_VAR,
                        communication_rounds=MIN_CR,
                        epochs=epochs,
                        batch_size=batch_size,
                        learning_rate=lr,
                        lr_decay=decay,
                    )

                    time.sleep(BENCH_TIME_PER_VAR)
                    yield timestamp
        print("Death Blossom activated")
        os.system('./exit.escript')  # Kills "python3", but not "python"

    # Run benchmark
    benchmark.run(print_generator(), start_server_locally)
Пример #28
0
def main() :
	
	usr_opts = PlannerOptions( sys.argv[1:] )
	
	command = '%s --domain %s --problem %s --time %s'%( usr_opts.planner, usr_opts.domain, usr_opts.problem, usr_opts.max_time )	
	log_filename = [ os.path.basename(usr_opts.domain).replace('.pddl',''),
			 '_',  os.path.basename( usr_opts.problem).replace('.pddl',''),
			 '_', usr_opts.planner.split('/')[-2], '.log' ]

	print log_filename
	
	log = benchmark.Log( ''.join(log_filename) )
	rv, time = benchmark.run( command, usr_opts.max_time, usr_opts.max_memory, log )

	print >> sys.stdout, "Exit Code:", rv, "Time:", time	
Пример #29
0
 def execute(self):
     if self.simple:
         cmd_string = './subopt_PR -d %s -i %s -I' % (self.domain,
                                                      self.problem)
     else:
         if self.bfs:
             cmd_string = './subopt_PR -d %s -i %s -B' % (self.domain,
                                                          self.problem)
         else:
             cmd_string = './subopt_PR -d %s -i %s' % (self.domain,
                                                       self.problem)
     self.log = benchmark.Log('%s.log' % self.noext_problem)
     self.signal, self.time = benchmark.run(cmd_string, self.max_time,
                                            self.max_mem, self.log)
     self.gather_data()
Пример #30
0
def bench(start_env):
    def print_generator():
        BENCH_TIME_PER_VAR = 60 * 60 * 2.4  # In seconds
        MIN_CR = 1200  # Minimum number of communication rounds
        learning_rates = linspace(0.09, 0.02, 8)

        for lr in learning_rates:
            timestamp = fed_avg.print_assignment(
                dataset='MNIST-non-iid',
                variant='1',
                chosen_client_proportion=0.1,
                run_time=BENCH_TIME_PER_VAR,
                communication_rounds=MIN_CR,
                epochs=5,
                batch_size=20,
                learning_rate=lr,
                lr_decay=0,
            )
            time.sleep(BENCH_TIME_PER_VAR)
            yield timestamp
        print("Done!")

    # Run benchmark with learning_rates
    benchmark.run(print_generator(), start_env)
Пример #31
0
def main() :
	
	usr_opts = PlannerOptions( sys.argv[1:] )
	
	command = '%s --domain %s --problem %s --time %s'%( usr_opts.planner, usr_opts.domain, usr_opts.problem, usr_opts.max_time )	
	log_filename = [ os.path.basename(usr_opts.domain).replace('.pddl',''),
			 '_',  os.path.basename( usr_opts.problem).replace('.pddl',''),
			 '_', usr_opts.planner.split('/')[-2], '.log' ]

	print log_filename
	
	log = benchmark.Log( ''.join(log_filename) )
	rv, time = benchmark.run( command, usr_opts.max_time, usr_opts.max_memory, log )

	print >> sys.stdout, "Exit Code:", rv, "Time:", time	
Пример #32
0
def main():

    commands = [
        ('./siw.py ~/Sandboxes/Fast-Downward/benchmarks/blocks/domain.pddl %(instance)s plan.ipc',
         'siw-'),
    ]

    files = glob.glob(
        '/home/bowman/Sandboxes/Fast-Downward/benchmarks/blocks/probBLOCKS-*.pddl'
    )
    files.sort()
    os.system('rm -rf *.res')

    for command, prefix in commands:
        res_file = '%sresult.csv' % prefix
        if os.path.exists(res_file):
            os.system('rm -rf %s' % res_file)
        results = []
        for instance in files:
            output = prefix + os.path.split(instance)[-1].replace(
                'pddl', 'res')
            instance_command = command % locals()

            log = benchmark.Log(output)
            signal, time = benchmark.run(instance_command, 1800, 2048, log)
            print >> sys.stdout, "%s, %s, %s" % (instance, signal, time)
            expanded = None
            cost = None
            with open(output) as log_output:
                for line in log_output:
                    if 'expanded during' in line:
                        exp_tok = line.split(':')[-1]
                        expanded = exp_tok.strip()
                        continue
                    if 'Plan found with cost' in line:
                        cost_tok = line.split(':')[-1]
                        cost = cost_tok.strip()
                        continue
            results.append([
                os.path.split(instance)[-1].replace('.pddl',
                                                    '').replace('-', ' '),
                expanded, cost
            ])
        results.sort()
        with open(res_file, 'w') as output:
            for instance, exp, cost in results:
                print >> output, "%s,%s,%s" % (instance, exp, cost)
Пример #33
0
def main():
    opt = nff_options.Program_Options(sys.argv[1:])
    opt.print_options()

    dom_name_clean = os.path.basename(opt.domain).replace('.pddl', '')
    inst_name_clean = os.path.basename(opt.instance).replace('.pddl', '')

    name = '%s_%s' % (dom_name_clean, inst_name_clean)

    log = benchmark.Log('%s.log' % name)
    exec_name = 'c3'

    cmd = './%s -d %s -i %s' % (exec_name, opt.domain, opt.instance)
    #if opt.use_h1 : cmd += ' -1'
    #if opt.use_hcl : cmd += ' -2'
    #if opt.pw_each_layer : cmd += ' -P'
    #if opt.do_bnb : cmd += ' -B'
    #if opt.constrain_h1 : cmd += ' -C'
    #if opt.joint_persistency : cmd += ' -J'
    #if opt.keep_based_ranking : cmd += ' -K'
    #if opt.reachable : cmd += ' -R'

    #cmd += ' -b %d'%opt.branch_opt

    signal, time = benchmark.run(cmd, opt.max_time, opt.max_memory, log)

    res_info = [dom_name_clean, inst_name_clean, str(signal), str(time)]

    if os.path.exists('execution.stats'):
        instream = open('execution.stats')
        for line in instream:
            line = line.strip()
            toks = line.split('=')
            res_info.append(toks[1])
        instream.close()

    outstream = open('%s.result' % name, 'w')

    print >> outstream, ",".join(res_info)

    outstream.close()
Пример #34
0
 def execute(self):
     if LAMA.greedy:
         #cmd_string = '/home/mk/Planning/fastdownwardplanner/fast-downward.py  --alias seq-sat-lama-2011 %s %s '%( self.domain, self.problem)
         #cmd_string = '/home/mk/Planning/fastdownwardplanner/fast-downward.py  --alias lama-first %s %s '%( self.domain, self.problem)
         #cmd_string = '/home/mk/PycharmProjects/pic-to-plan-v2-git/pic_to_plan_v2/fd-0/fast-downward.py  --alias seq-sat-lama-2011 %s %s '%( self.domain, self.problem)
         cmd_string = os.path.join(
             '..', 'downward', 'fast-downward.py'
         ) + ' %s %s --evaluator "hff=ff()" --evaluator "hcea=cea()" --search "lazy_greedy([hff, hcea], preferred=[hff, hcea])"' % (
             self.domain, self.problem)
     else:
         #cmd_string = './plan %s %s %s'%( self.domain, self.problem, self.result)
         #cmd_string = '/home/mk/PycharmProjects/pic-to-plan-v2-git/pic_to_plan_v2/fd-0/fast-downward.py  --alias seq-sat-lama-2011 %s %s '%( self.domain, self.problem)  #Michael. I could put the normal lama here?
         #cmd_string = '/home/mk/PycharmProjects/pic-to-plan-v2-git/pic_to_plan_v2/fd-0/fast-downward.py %s %s --evaluator "hff=ff()" --evaluator "hcea=cea()" --search "lazy_greedy([hff, hcea], preferred=[hff, hcea])"'%( self.domain, self.problem)
         cmd_string = os.path.join(
             '..', 'downward', 'fast-downward.py'
         ) + ' %s %s --evaluator "hff=ff()" --evaluator "hcea=cea()" --search "lazy_greedy([hff, hcea], preferred=[hff, hcea])"' % (
             self.domain, self.problem)
     self.log = benchmark.Log(self.log_file)
     self.signal, self.time = benchmark.run(cmd_string, self.max_time,
                                            self.max_mem, self.log)
     self.gather_data()
Пример #35
0

if __name__ == '__main__':
    sys.path.insert(1, '../../../utils/benchmarks/popart')
    import benchmark

    module = benchmark.Benchmark(
        graph_builder,
        add_args,
        iteration_report
    )

    opts = benchmark.parse_opts(module)

    opts.train = opts.mode == "train"

    # Log Benchmark Message
    print("Popart Multi-IPU {} Synthetic benchmark.\n"
          " Batch size {}.\n"
          " Batches per Step {}.\n"
          " Steps {}.\n"
          " {} IPUs."
          .format(
              {"infer": "Inference", "eval": "Evaluation", "train": "Training"}[opts.mode],
              opts.batch_size,
              opts.batches_per_step if not opts.report else "n/a",
              opts.steps if not opts.report else "n/a",
              opts.shards))
    np.random.seed(42)
    benchmark.run(module, opts)
	def execute( self ) :
		cmd_string = './ff -O -E -o %s -f %s'%(self.domain, self.problem)
		self.log = benchmark.Log( self.log_file )
		self.signal, self.time = benchmark.run( cmd_string, self.max_time, self.max_mem, self.log )
		self.gather_data()
Пример #37
0
import adv_test
from adv import *
import mikoto
import benchmark


def module():
    return Mikoto


class Mikoto(mikoto.Mikoto):
    pass


if __name__ == '__main__':
    conf = {}
    conf['acl'] = """
        `s1, seq=5 and cancel or fsc
        `s2, seq=5 and cancel or fsc
        `s3, seq=5 and cancel or fsc
        """

    def foo():
        adv_test.test(module(), conf, verbose=0, mass=1)

    benchmark.run(foo)
Пример #38
0
    parser.add_argument('-r',
                        '--run',
                        default="dpbench_run",
                        help="Run name for mlflow")
    parser.add_argument('-p',
                        '--path',
                        default="results/",
                        help="Path of the folder to save the results")

    args = parser.parse_args()
    if isinstance(args.dataset[0], str) and len(args.dataset) == 1:
        args.dataset = args.dataset[0].split()

    if isinstance(args.epsilon[0], str) and len(args.epsilon) == 1:
        args.epsilon = args.epsilon[0].split()

    if isinstance(args.metric[0], str) and len(args.metric) == 1:
        args.metric = args.metric[0].split()

    return args


if __name__ == "__main__":

    args = _parse_args()

    benchmark.run(epsilons=args.epsilon,
                  run_name=args.run,
                  metric=args.metric,
                  dataset=args.dataset,
                  result_path=args.path)
Пример #39
0
                " worker if one is specified[/red bold]")
            sys.exit(1)
        threading_configurations =\
                list(itertools.product(args.workers, args.threads_per_worker))

    if args.prefix is None:
        repo = git.Repo(os.path.abspath(os.path.join(SOURCE_DIR, '../../../')))
        commit_string = datetime.datetime.now().strftime('%Y-%m-%d') + "_"\
                + git_describe(repo) + "_"\
                + util.make_random_nonce()

        if args.profile:
            args.prefix = os.path.abspath(
                os.path.join(SOURCE_DIR, '../profiles', commit_string))
        else:
            args.prefix = os.path.abspath(
                os.path.join(SOURCE_DIR, '../timings', commit_string))
        rich.print("Placing results in [red bold]{}[/red bold]".format(
            os.path.relpath(args.prefix)))

    start_time = timer()
    benchmark.run(args.prefix, args.regions, args.taxa, args.iters, args.procs,
                  args.program, args.profile, threading_configurations,
                  flamegraph_cmd)
    end_time = timer()
    with open(os.path.join(args.prefix, "notes.md"), 'a') as notesfile:
        notesfile.write("- notes:\n")
        if args.notes:
            notesfile.write("  - {}\n".format(args.notes))
    rich.print("Benchmarks took {:.3f} seconds".format(end_time - start_time))
Пример #40
0
        inputs,
        initializer,
        add_args,
        iteration_report
    )

    options = benchmark.parse_opts(module, False)

    if options.shards > 1:
        raise NotImplementedError(
            "--shards option has not been implemented with this example")

    # Log Benchmark Message
    print("Multi-layer LSTM with a dense final layer, {} Benchmark.\n"
          " Batch size {}.\n"
          " Batches per Step {}.\n"
          " Steps {}.\n"
          " Hidden size {}.\n"
          " Number of layers {}.\n"
          " Timesteps {}.\n"
          .format(
              "Training" if options.train else "Inference",
              options.batch_size,
              options.batches_per_step if not options.report else "n/a",
              options.steps if not options.report else "n/a",
              options.hidden_size,
              options.num_layers,
              options.timesteps))

    benchmark.run(module, options)
Пример #41
0
def main():
    """Main program."""
    logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s',
                        level=logging.INFO)

    # benchmark if necessary
    if not os.path.isfile(BENCHMARKS_FILE):
        import benchmark
        paying, ports = nicehash_multialgo_info()
        benchmark.run(paying.keys())

    # load benchmarks
    benchmarks = json.load(open(BENCHMARKS_FILE))

    running_algorithm = None
    cpuminer_thread = None

    while True:
        try:
            paying, ports = nicehash_multialgo_info()
        except urllib.error.URLError as err:
            logging.warning('failed to retrieve ZPOOL stats: %s' % err.reason)
        except urllib.error.HTTPError as err:
            logging.warning('server error retrieving ZPOOL stats: %s %s' %
                            (err.code, err.reason))
        except socket.timeout:
            logging.warning('failed to retrieve ZPOOL stats: timed out')
        except (json.decoder.JSONDecodeError, KeyError):
            logging.warning('failed to parse ZPOOL stats')
        else:
            if cpuminer_thread != None:
                # Update hash rate if enough accepted hases have been seen
                if np.min(
                        cpuminer_thread.nof_hashes) > NOF_HASHES_BEFORE_UPDATE:
                    benchmarks[running_algorithm]['hash_rate'] = np.sum(
                        cpuminer_thread.hash_sum / cpuminer_thread.nof_hashes)
                    benchmarks[running_algorithm]['last_updated'] = time()
                    json.dump(benchmarks, open(BENCHMARKS_FILE, 'w'))
                    logging.info(
                        'UPDATED HASH RATE OF ' + running_algorithm + ' TO: ' +
                        str(benchmarks[running_algorithm]['hash_rate']))
                # Remove payrate if the algorithm is not working
                if cpuminer_thread.fail_count > 5 and time(
                ) - cpuminer_thread.last_fail_time < 60:
                    payrates[running_algorithm] = 0
                    benchmarks[running_algorithm][
                        'last_fail_time'] = cpuminer_thread.last_fail_time
                    json.dump(benchmarks, open(BENCHMARKS_FILE, 'w'))
                    logging.error(
                        running_algorithm +
                        ' FAILS MORE THAN ALLOWED SO IGNORING IT FOR NOW!')

            # Compute payout and get best algorithm
            payrates = nicehash_mbtc_per_day(benchmarks, paying)
            best_algorithm = max(payrates.keys(),
                                 key=lambda algo: payrates[algo])

            # Switch algorithm if it's worth while
            if running_algorithm == None or running_algorithm != best_algorithm and \
                (payrates[running_algorithm] == 0 or payrates[best_algorithm]/payrates[running_algorithm] >= 1.0 + PROFIT_SWITCH_THRESHOLD):

                # kill previous miner
                if cpuminer_thread != None:
                    cpuminer_thread.join()
                    logging.info('killed process running ' + running_algorithm)

                # start miner
                logging.info('starting mining using ' + best_algorithm +
                             ' using ' +
                             str(benchmarks[best_algorithm]['nof_threads']) +
                             ' threads')
                cpuminer_thread = MinerThread([
                    './cpuminer', '-u', WALLET + '.' + WORKER, '-p', 'c=BTC',
                    '-o', 'stratum+tcp://' + best_algorithm + '.' +
                    'mine.zpool.ca:' + str(ports[best_algorithm]), '-a',
                    best_algorithm, '-t',
                    str(benchmarks[best_algorithm]['nof_threads'])
                ], benchmarks[best_algorithm]['nof_threads'])
                cpuminer_thread.start()
                running_algorithm = best_algorithm

        def printHashRateAndPayRate():
            if running_algorithm is not None:
                if (np.sum(cpuminer_thread.nof_hashes) > 0):
                    hash_rate = np.sum(cpuminer_thread.hash_sum /
                                       cpuminer_thread.nof_hashes)
                    logging.info('Current average hashrate is %f H/s' %
                                 hash_rate)
                    current_payrate = compute_revenue(
                        paying[running_algorithm], hash_rate)
                    logging.info(
                        running_algorithm +
                        ' is currently expected to generate %f mBTC/day or %f mBTC/month'
                        % (current_payrate, current_payrate * 365 / 12))

        printHashRateAndPayRate()
        sleep(UPDATE_INTERVAL / 2)
        printHashRateAndPayRate()
        sleep(UPDATE_INTERVAL / 2)