def tag(self, tag, urls, token=None, **kwargs): """Append the list of ``urls`` to the ``tag``.""" defaults = {'delayed': False, 'update': False} defaults.update(kwargs) url = '%s?%s' % (canonizetag(tag), '&'.join( '%s=%s' % (k, '1' if v else '') for k, v in defaults.items())) return self._download(url, json.dumps(urls), token=token)
def tag(self, tag, urls, token=None, **kwargs): """Append the list of ``urls`` to the ``tag``.""" defaults = {'delayed': False, 'update': False} defaults.update(kwargs) url = '%s?%s' % (canonizetag(tag), '&'.join('%s=%s' % (k, '1' if v else '') for k, v in defaults.items())) return self._download(url, json.dumps(urls), token=token)
def add_node(): orig_config = json.loads( disco.request("/disco/ctrl/load_config_table")) config = orig_config[:] config.append(["missingnode", "2"]) r = disco.request("/disco/ctrl/save_config_table", json.dumps(config)) if r != "\"table saved!\"": raise Exception("Couldn't add a dummy node: %s" % r) return orig_config
def put(self, tag, urls): """Put the list of ``urls`` to the tag ``tag``. .. warning:: Generally speaking, concurrent applications should use :meth:`DDFS.tag` instead. """ return self._upload('%s/ddfs/tag/%s' % (self.master, tagname(tag)), StringIO(json.dumps(urls)))
def put(self, tag, urls, token=None): """Put the list of ``urls`` to the tag ``tag``. .. warning:: Generally speaking, concurrent applications should use :meth:`DDFS.tag` instead. """ return self._upload(canonizetag(tag), StringIO(json.dumps(urls)), token=token)
def put(self, tag, urls): """Put the list of ``urls`` to the tag ``tag``. .. warning:: Generally speaking, concurrent applications should use :meth:`DDFS.tag` instead. """ from comm_httplib import download status, body = download('%s/ddfs/tag/%s' % (self.master, tagname(tag)), data=json.dumps(urls), method='PUT') return json.loads(body)
def results(self, jobspec, timeout=2000): """ Returns a list of results for a single job or for many concurrently running jobs, depending on the type of *jobspec*. If *jobspec* is a string (job name) or the function is called through the job object (``job.results()``), this function returns a list of results for the job if the results become available in *timeout* milliseconds. If not, returns an empty list. (*Added in version 0.2.1*) If *jobspec* is a list of jobs, the function waits at most for *timeout* milliseconds for at least one on the jobs to finish. In this mode, *jobspec* can be a list of strings (job names), a list of job objects, or a list of result entries as returned by this function. Two lists are returned: a list of finished jobs and a list of still active jobs. Both the lists contain elements of the following type:: ["job name", ["status", [results]]] where status is either ``unknown_job``, ``dead``, ``active`` or ``ready``. You can use the latter mode as an efficient way to wait for several jobs to finish. Consider the following example that prints out results of jobs as soon as they finish. Here ``jobs`` is initially a list of jobs, produced by several calls to :meth:`Disco.new_job`:: while jobs: ready, jobs = disco.results(jobs) for name, results in ready: for k, v in result_iterator(results[1]): print k, v disco.purge(name) Note how the list of active jobs, ``jobs``, returned by :meth:`Disco.results` can be used as the input to the function itself. """ jobspecifier = JobSpecifier(jobspec) data = json.dumps([timeout, list(jobspecifier.jobnames)]) results = json.loads(self.request('/disco/ctrl/get_results', data)) if isinstance(jobspec, basestring): return results[0][1] others, active = [], [] for result in results: if result[1][0] == 'active': active.append(result) else: others.append(result) return others, active
def results(self, jobspec, timeout=2000): jobspecifier = JobSpecifier(jobspec) data = json.dumps([timeout, list(jobspecifier.jobnames)]) results = json.loads(self.request("/disco/ctrl/get_results", data)) if type(jobspec) == str: return results[0][1] others, active = [], [] for result in results: if result[1][0] == "active": active.append(result) else: others.append(result) return others, active
def setattr(self, tag, attr, val, token=None): """Set the value of the attribute ``attr`` of the tag ``tag``.""" return self._upload(self._tagattr(tag, attr), StringIO(json.dumps(val)), token=token)
def write(self, status=None, timestamp=None, host=None, message=None): if timestamp: print json.dumps([timestamp, host, message])
def tag(self, tag, urls, delayed=False): """Append the list of ``urls`` to the ``tag``.""" return self._download('/ddfs/tag/%s?%s' % (tagname(tag), "delayed=1" if delayed else ""), json.dumps(urls))
def set_config(self, config): response = json.loads(self.request('/disco/ctrl/save_config_table', json.dumps(config))) if response != 'table saved!': raise DiscoError(response)
def test(): num = sum(x['max_workers'] for x in disco.nodeinfo()['available']) inputs = range(num * 2) job = disco.new_job( name = "test_missingnode", map = fun_map, input = tserver.makeurl(inputs)) results = job.wait() s = sum(int(k) for k, v in result_iterator(results)) correct = sum(range(num * 2)) if s != correct: raise Exception("Invalid result. Got %d, expected %d" %\ (s, correct)) job.purge() disco = Disco(sys.argv[1]) tserver.run_server(data_gen) orig_config = add_node() try: test() except: raise finally: disco.request("/disco/ctrl/save_config_table", json.dumps(orig_config)) print "ok"
def set_config(self, config): response = json.loads( self.request('/disco/ctrl/save_config_table', json.dumps(config))) if response != 'table saved!': raise DiscoError(response)
def tag(self, tag, urls): """Append the list of ``urls`` to the ``tag``.""" return self._request('/ddfs/tag/%s' % tagname(tag), json.dumps(urls))