def __init__(self, devices, global_flags): global _workerid signal.signal(signal.SIGINT, signal.SIG_IGN) # Keyboard interrupts go up to main process globals.set_devices(devices) # On windows we need to do this because we didn't actually fork globals.flags.copy_from(global_flags) process = multiprocessing.current_process() if process.name == "MainProcess": _workerid = 0 else: process_type, process_id = process.name.split("-") # Get unique 1-base "process_id", which gets larger every time a new pool is created _workerid = (int(process_id)-1) % len(devices) # Get unique 0-based "worker_id" index, always in range {0,...,nprocess-1} # This function is the entry point of a worker process. #logging.info("prediction %d on device %d" % (_workerid, globals._devices[_workerid])) sm.set_backend_options(device=globals._devices[_workerid])
def __init__(self, devices, global_flags): global _workerid signal.signal( signal.SIGINT, signal.SIG_IGN) # Keyboard interrupts go up to main process globals.set_devices( devices ) # On windows we need to do this because we didn't actually fork globals.flags.copy_from(global_flags) process = multiprocessing.current_process() if process.name == "MainProcess": _workerid = 0 else: process_type, process_id = process.name.split( "-" ) # Get unique 1-base "process_id", which gets larger every time a new pool is created _workerid = (int(process_id) - 1) % len( devices ) # Get unique 0-based "worker_id" index, always in range {0,...,nprocess-1} # This function is the entry point of a worker process. #logging.info("prediction %d on device %d" % (_workerid, globals._devices[_workerid])) sm.set_backend_options(device=globals._devices[_workerid])
parser.add_argument("-d", "--device", type=int, default=None, help="The device to use, e.g. CUDA device.") parser.add_argument( "-m", "--method", type=str, default="L-BFGS-B", help= "The optimization algorithm to use. Valid values are COBYLA and L-BFGS-B.") args = parser.parse_args() if args.device is not None: smat.set_backend_options(device=args.device) print "Using device", smat.get_backend_info().device print "Using method", args.method, "with float64" # Load some sample bio data. Specifically this is a subset of the # RNAcompete protein binding affinities from Ray et al., Nature, 2013. y = numpy.load('data/rnac/rnac_subset.npz')['y'] n, m = y.shape def objective_function(x, y, lib): # The test objective function below happens to be that corresponding to # "Variance Stabilization" (Huber et al., Bioinformatics, 2002). # The specific objective is not important. # The point is that the parameters can be sent to the GPU,
# http://tools.genes.toronto.edu/deepbind/ # import numpy import numpy.random import smat import smat.util import argparse import scipy.optimize parser = argparse.ArgumentParser(description="Train a 784-1000-1000-10 neural net on MNIST and print out the error rates.") parser.add_argument("-d","--device",type=int,default=None,help="The device to use, e.g. CUDA device.") parser.add_argument("-m","--method",type=str,default="L-BFGS-B",help="The optimization algorithm to use. Valid values are COBYLA and L-BFGS-B.") args = parser.parse_args() if args.device is not None: smat.set_backend_options(device=args.device) print "Using device",smat.get_backend_info().device print "Using method",args.method,"with float64" # Load some sample bio data. Specifically this is a subset of the # RNAcompete protein binding affinities from Ray et al., Nature, 2013. y = numpy.load('data/rnac/rnac_subset.npz')['y'] n,m = y.shape def objective_function(x,y,lib): # The test objective function below happens to be that corresponding to # "Variance Stabilization" (Huber et al., Bioinformatics, 2002). # The specific objective is not important. # The point is that the parameters can be sent to the GPU, # evaluated, pulled back, and STILL be much faster than CPU.