def gpu_job(i, op, o): """ Convert a cpu job to a gpu job inputs: i - iterable of input variables op - a cpu op o - iterable of output variables outputs: gi - iterable of input variables gop - a gpu op go - iterable of output variables """ node = op.make_node(*i) for v,nv in zip(o, node.outputs): nv.name = v.name gii, goo = cpu_to_gpu_graph(node.inputs, node.outputs) if len(list_of_nodes(gii, goo)) != 1: raise ValueError("We have assumed that translations of single cpu-ops " "would result in single gpu-ops. This computation " "has invalidated that assumption") op = goo[0].owner.op go = map(lambda x: x.clone(), goo) gi = map(lambda x: x.clone(), gii) for v, gv in zip(i+o, gi+go): gv.name = gpu_name(v.name) return gi, op, go
def gpu_job(i, op, o): """ Convert a cpu job to a gpu job inputs: i - iterable of input variables op - a cpu op o - iterable of output variables outputs: gi - iterable of input variables gop - a gpu op go - iterable of output variables """ node = op.make_node(*i) for v, nv in zip(o, node.outputs): nv.name = v.name gii, goo = cpu_to_gpu_graph(node.inputs, node.outputs) if len(list_of_nodes(gii, goo)) != 1: raise ValueError("We have assumed that translations of single cpu-ops " "would result in single gpu-ops. This computation " "has invalidated that assumption") op = goo[0].owner.op go = map(lambda x: x.clone(), goo) gi = map(lambda x: x.clone(), gii) for v, gv in zip(i + o, gi + go): gv.name = gpu_name(v.name) return gi, op, go
def internal_gpu_theano_graph(dag): """ inputs - a dicdag with send/recvs outputs - gins/gots - a theano i/o graph with gpu variables and ops # sents - tuple of sent gpu variables # recvs - a tuple of recved gpu variables """ non_comm, sent, recved = non_comm_dag(dag) inputs = inputs_of( non_comm) outputs = outputs_of(non_comm) ins, outs = dicdag.theano.dag_to_theano_graph(non_comm, inputs, outputs) gins, gouts = cpu_to_gpu_graph(ins, outs) return gins, gouts
def internal_gpu_theano_graph(dag): """ inputs - a dicdag with send/recvs outputs - gins/gots - a theano i/o graph with gpu variables and ops # sents - tuple of sent gpu variables # recvs - a tuple of recved gpu variables """ non_comm, sent, recved = non_comm_dag(dag) inputs = inputs_of(non_comm) outputs = outputs_of(non_comm) ins, outs = dicdag.theano.dag_to_theano_graph(non_comm, inputs, outputs) gins, gouts = cpu_to_gpu_graph(ins, outs) return gins, gouts
def time_computation(inputs, outputs, numeric_inputs, niter): gpu_inputs, gpu_outputs = cpu_to_gpu_graph(inputs, outputs) # TODO: replace this with a can_run_on(job, machine) function # use this function in tompkins if not all(isinstance(n.op, theano.sandbox.cuda.GpuOp) for n in theano.gof.graph.list_of_nodes(gpu_inputs, gpu_outputs)): return 99999.9 gpu_numeric_inputs = map(togpu_data, numeric_inputs) f = theano.function(gpu_inputs, gpu_outputs) starttime = time.time() debugprint("Computing") for n in xrange(niter): outputs = f(*numeric_inputs) endtime = time.time() duration = endtime - starttime return duration/niter
def time_computation(inputs, outputs, numeric_inputs, niter): gpu_inputs, gpu_outputs = cpu_to_gpu_graph(inputs, outputs) # TODO: replace this with a can_run_on(job, machine) function # use this function in tompkins if not all( isinstance(n.op, theano.sandbox.cuda.GpuOp) for n in theano.gof.graph.list_of_nodes(gpu_inputs, gpu_outputs)): return 99999.9 gpu_numeric_inputs = map(togpu_data, numeric_inputs) f = theano.function(gpu_inputs, gpu_outputs) starttime = time.time() debugprint("Computing") for n in xrange(niter): outputs = f(*numeric_inputs) endtime = time.time() duration = endtime - starttime return duration / niter