def timing_triple_cloud(): execfile('picloud_venture_credentials.py') exp_params = experiment.exp_param_defaults({}) exp_params['intermediate_iter'] = 1 exp_params['max_initial_run_time'] = 30 exp_params['max_burn_time'] = 30 exp_params['max_sample_time'] = 30 exp_params['n_samples'] = 25 print experiment.exp_params_to_str(exp_params) data = scipy.io.loadmat("../data/irm_synth/irm_synth_20.mat", squeeze_me=True) observed = list(zip(data['train_i'].flat, data['train_j'].flat, data['train_v'].flat)) missing = list(zip(data['test_i'].flat, data['test_j'].flat, data['test_v'].flat)) data = {'observations' : observed, 'missing' : missing} model = models.product_IRM model_params = {'D' : 1, 'alpha' : 1, 'symmetric' : True} # Timing run print 'Timing' job_id = cloud.call(experiment.network_cv_timing_run, data, model, exp_params, model_params, _max_runtime=5, _env=cloud_environment) time_per_mh_iter = cloud.result(job_id)['time_per_mh_iter'] # Live run print 'Live' exp_params['intermediate_iter'] = max(1, int(round(0.9 * exp_params['max_sample_time'] / (exp_params['n_samples'] * time_per_mh_iter)))) job_id = cloud.call(experiment.network_cv_single_run, data, model, exp_params, model_params, _max_runtime=5, _env=cloud_environment) cloud.join(job_id) print cloud.result(job_id)
def runcode(self, code): """Execute a code object. When an exception occurs, self.showtraceback() is called to display a traceback. All exceptions are caught except SystemExit, which is reraised. A note about KeyboardInterrupt: this exception may occur elsewhere in this code, and may not always be caught. The caller should be prepared to deal with it. """ try: job = cloud.call(cloud_run, code, self.locals) cloud.join(job) result = cloud.result(job) self.locals.update(result) info = cloud.info(job, ['stderr', 'stdout'])[job] sys.stdout.write(info['stdout']) sys.stderr.write(info['stderr']) except SystemExit: raise except KeyboardInterrupt: raise OperationAborted('Interrupted') except cloud.CloudException, e: self.showcloudtraceback(e)
def outer_map(y): jids = cloud.map(inner_map, range(y)) cloud.join(jids) results = cloud.result(jids) return list(results)
def outer_map(y): jids=cloud.map(inner_map,range(y)) cloud.join(jids) results = cloud.result(jids) return list(results)
def timing_run_cloud(): execfile('picloud_venture_credentials.py') exp_params = experiment.exp_param_defaults({}) exp_params['intermediate_iter'] = 1 exp_params['max_initial_run_time'] = 30 print experiment.exp_params_to_str(exp_params) data = scipy.io.loadmat("../data/irm_synth/irm_synth_20.mat", squeeze_me=True) observed = list(zip(data['train_i'].flat, data['train_j'].flat, data['train_v'].flat)) missing = list(zip(data['test_i'].flat, data['test_j'].flat, data['test_v'].flat)) data = {'observations' : observed, 'missing' : missing} model = models.product_IRM model_params = {'D' : 1, 'alpha' : 1, 'symmetric' : True} job_id = cloud.call(experiment.network_cv_timing_run, data, model, exp_params, model_params, _max_runtime=5, _env=cloud_environment) cloud.join(job_id) print cloud.result(job_id)
def url_chunker(url, chunksize=1024): """Returns an iterator over contents of a file *Params* #file - an open FILE object #chunksize - how many lines to read at once? """ #url=book[0] #bookname=book[1] user_agent = {'User-agent': 'Mozilla/5.0'} result=requests.get(url,headers=user_agent) try: doc = result.content except: raise Exception("URL "+url+"not responding") text_in=StringIO(doc) chunks = [] stop = False while not stop: text="" for x in range(chunksize): try: text+=text_in.next() except StopIteration: chunks.append(text) stop=True break chunks.append(text) jobids = cloud.map(wordcount, [(url,c) for c in chunks]) cloud.join(jobids,deadlock_check=False) results = cloud.result(jobids) index=reduce_results(results) mongo_insert(index) return "OK"
def url_chunker(url, chunksize=1024): """Returns an iterator over contents of a file *Params* #file - an open FILE object #chunksize - how many lines to read at once? """ #url=book[0] #bookname=book[1] user_agent = {'User-agent': 'Mozilla/5.0'} result = requests.get(url, headers=user_agent) try: doc = result.content except: raise Exception("URL " + url + "not responding") text_in = StringIO(doc) chunks = [] stop = False while not stop: text = "" for x in range(chunksize): try: text += text_in.next() except StopIteration: chunks.append(text) stop = True break chunks.append(text) jobids = cloud.map(wordcount, [(url, c) for c in chunks]) cloud.join(jobids, deadlock_check=False) results = cloud.result(jobids) index = reduce_results(results) mongo_insert(index) return "OK"
def run(self, **kwargs): """ Runs the experiment. If the experiment is run on the cloud, blocks until all jobs complete. """ if not self.configured: raise BaseException("Must configure experiment first") logger.debug('Running experiment') cloud_job_ids = [] self.run_params = kwargs.copy() run_mode = kwargs.get('run_mode', 'local') self.run_mode = run_mode self.jobs = [] for job in self.iter_jobs(): result = job.run(**kwargs) if run_mode == 'cloud': cloud_job_ids.append(result) self.jobs.append(job) if run_mode == 'cloud': logger.info("Waiting for cloud jobs to finish") cloud.join(cloud_job_ids) logger.info("Cloud jobs finished")
def f(): if run_mode == 'cloud': cloud.join([self.job_id]) store = storage.CloudStore() else: store = storage.LocalStore() full_history = store[self.params] partial_history = History() if iters is None: partial_history.states = full_history.states else: if isinstance(iters, int): #iters interpreted as stride iter_set = range(0, len(full_history.states), iters) else: iter_set = iters partial_history.states = [ state for state in full_history.states if state.iter in iter_set ] partial_history.job = self partial_history.summary = full_history.summary return partial_history
def test_info_status(): jid = cloud.call(lambda: 3*3) cloud.join(jid) result = cloud.info(jid, ['status'] ) obj = result[jid] assert obj['status'] == 'done'
def test_info_stderr(): jid = cloud.call(lambda: 3*3) cloud.join(jid) result = cloud.info(jid, ['stderr'] ) obj = result[jid] assert obj['stderr'] == None
def test_info_cpu_percentage(): jid = cloud.call(lambda: 3*3) cloud.join(jid) result = cloud.info(jid, ['cpu_percentage'] ) obj = result[jid] assert obj['cpu_percentage.system'] == None
def test_info_all(): jid = cloud.call(lambda: 3*3) cloud.join(jid) result = cloud.info(jid, ['status','stdout','stderr','logging','pilog','exception','runtime','created','finished','env','vol','function','label','args','kwargs','func_body','attributes','profile','memory','cputime','cpu_percentage','ports'] ) obj = result[jid] assert obj['status'] == 'done'
def test_exception3(): '''Raise TypeError since cloud.join called with 1 invalid argument''' cloud.join("asdf")
def test_info_cputime(): jid = cloud.call(lambda: 3*3) cloud.join(jid) result = cloud.info(jid, ['cputime.user'] ) obj = result[jid] assert obj['cputime.user'] > 0
''' Created on Jan 26, 2012 @author: Mudassar ''' import os import json import cloud from etl.config.drivers.picloud import config def sum(): return 10 + 10 if __name__ == '__main__': cloud.setkey(config['keyid'], config['key']) print cloud.realtime.request('c1', 10) jobid = cloud.call(sum, _type="c1", _label="TEST") cloud.join(jobid) print cloud.result(jobid)
def test_info_finished(): jid = cloud.call(lambda: 3*3) cloud.join(jid) result = cloud.info(jid, ['finished'] ) obj = result[jid] assert obj['finished'] is not None
from pyevolve_rastrigin import * import cloud # Please change these cloud.setkey('4027', 'xxxx') # List of Random seeds and run-Ids # assuming 10 runs seed_list=[100*(i+1) for i in range(10)] runid_list=[i+1 for i in range(10)] jids = cloud.map(run_ga,seed_list,runid_list) cloud.join(jids) for i in range(10): cloud.files.get('stats_' + str(i+1) + '.csv','stats_' + str(i+1)+'.csv')
import cloud jid = cloud.call(lambda: 3*3) cloud.join(jid) answer = cloud.result(jid) print answer
import cloud import os import time ### don't forget to set your own API key and secret in cloud_config.py from cloud_config import * cloud.setkey(key, secret) def cloud_status(jids): s = Counter(cloud.status(jids)) return s def inner_map(x): return x * x def outer_map(y): jids = cloud.map(inner_map, range(y)) cloud.join(jids) results = cloud.result(jids) return list(results) jids = cloud.map(outer_map, range(5)) cloud.join(jids) results = cloud.result(jids)
def sendtocloud(*args, **kwargs): import cloud jid = cloud.call(func,*args,**kwargs) cloud.join(jid) print 'Result:: ', cloud.result(jid)
def test_multiply(): jid = cloud.call(lambda: 3*3) cloud.join(jid) answer = cloud.result(jid) assert answer == 9
def test_info_exception(): jid = cloud.call(lambda: 3*3) cloud.join(jid) result = cloud.info(jid, ['exception'] ) obj = result[jid] assert obj['exception'] == None
def test_info_args(): jid = cloud.call(lambda: 3*3) cloud.join(jid) result = cloud.info(jid, ['args'] ) obj = result[jid] assert obj['args'] is not None
def test_infoi_runtime(): jid = cloud.call(lambda: 3*3) cloud.join(jid) result = cloud.info(jid, ['runtime'] ) obj = result[jid] assert obj['runtime'] > 0
def test_info_memory(): jid = cloud.call(lambda: 3*3) cloud.join(jid) result = cloud.info(jid, ['memory.max_usage'] ) obj = result[jid] assert obj['memory.max_usage'] > 0
def test_info_env(): jid = cloud.call(lambda: 3*3) cloud.join(jid) result = cloud.info(jid, ['env'] ) obj = result[jid] assert obj['env'] == ''
def test_exception2(): '''Raise TypeError since cloud.join called without arguments''' cloud.join()
Output is a new object in bucket with 'thumb_' prepended.""" thumbnail_filename = 'thumb_' + key_name # download object to filesystem cloud.bucket.get(key_name) img = Image.open(key_name) img.thumbnail((100,100), Image.ANTIALIAS) # save the image to the filesystem img.save(thumbnail_filename, 'JPEG') # store the image file in your bucket cloud.bucket.put(thumbnail_filename) if __name__ == '__main__': # put face.jpg into your bucket cloud.bucket.put('face.jpg') # run thumbnail() on the cloud jid = cloud.call(thumbnail, 'face.jpg') # wait for job to finish cloud.join(jid) # download image cloud.bucket.get('thumb_face.jpg')
def test_info_function(): jid = cloud.call(lambda: 3*3) cloud.join(jid) result = cloud.info(jid, ['function'] ) obj = result[jid] assert obj['function'] is not None
def step_fitness(self): """ Run fitness tests for the current generation, and evolve the next generation. """ # Firstly, render code for all the genomes in the current population. Each genome owns its own # simulation object, because we want to interleave the simulations, running D_ROUNDS of simulation # rounds for all genomes, and killing off the weakest until BROOD_SIZE genomes remain. if self.next_population: self.population = copy.deepcopy(self.next_population) self.next_population = None for genome in self.population: code = genome.render(debug = self.DEBUG) genome.code_hash = md5.md5(code).hexdigest() genome.agent_name = 'agent_' + genome.code_hash genome.agent_path = 'agents/rendered/' + genome.agent_name + '.py' f = open(genome.agent_path, 'w') f.write(code) f.close() genome.agent_module = __import__('agents.rendered.'+genome.agent_name, fromlist=['*']) genome.simulation = simulate.Simulate(**self.sim_parameters) genome.simulation.agent_move = genome.agent_module.move genome.simulation.agent_observe_who = genome.agent_module.observe_who jobs = {} def job_callback(job): jobs[job].simulation = cloud.result(job) logger.debug('Job %d completed with fitness %.2f.' % (job, 1.0*jobs[job].simulation.total_payoff / jobs[job].simulation.round)) def job_error(job): logger.debug('Job %d terminated with an error.' % job) while len(self.population) > self.BROOD_SIZE: if self.single_thread: for genome in self.population: try: genome.simulation.run(N_rounds = self.D_ROUNDS, return_self = True) except: e = sys.exc_info() logger.debug('----------------------------------------------------------------------') logger.debug(traceback.format_exc()) logger.debug("State graph:") logger.debug(pprint.pformat(genome.state)) else: for genome in self.population: jobs[cloud.call(genome.simulation.run, N_rounds = self.D_ROUNDS, return_self = True, _callback = [job_callback], _callback_on_error = [job_error], _fast_serialization = 0, _type='c1')] = genome done = False while not done: done = True try: cloud.join(jobs.keys()) except cloud.CloudException: done = False e = sys.exc_info() logger.debug("More information on Job %d's unexpected termination:" % e[1].jid) logger.debug("State graph:") logger.debug(pprint.pformat(jobs[e[1].jid].state)) jobs.pop(e[1].jid) self.population.sort(reverse=True, key=lambda genome: 1.0 * genome.simulation.total_payoff) self.population = [genome for genome in self.population if genome.simulation.total_payoff >= self.PERFORMANCE_THRESHOLD] logger.debug([1.0 * genome.simulation.total_payoff / genome.simulation.round for genome in self.population]) new_N = int(round(len(self.population) * (1. - self.DECIMATION_PERCENT))) if new_N < self.BROOD_SIZE: new_N = self.BROOD_SIZE # Let the fittest survive self.population = self.population[0:new_N]
import cloud def add(x, y): return x + y jid = cloud.call(add, 1, 2) cloud.join(jid,timeout=20,ignore_errors=True,deadlock_check=False) answer = cloud.result(jid) print answer