def parse_args():
    parser = argparse.ArgumentParser(description='PyTorch Deeplift comparison')
    parser.add_argument(
        '--model',
        type=str,
        default='vbd_opposite',
        choices=[
            'vbd', 'vgd', 'p_b', 'p_g', 'ising_vbd', 'ising_soft_vbd',
            'vbd_window'
        ],
        help='choose from ["vbd_rank_func", "bern", "add_gauss"]')
    parser.add_argument('--l1_reg_coef',
                        type=float,
                        default=0.1,
                        help='Only use in IsingBDNet')
    parser.add_argument('--l2_reg_coef',
                        type=float,
                        default=0.,
                        help='Only use in IsingBDNet')
    parser.add_argument('--window',
                        type=int,
                        default=2,
                        help='Perturbation size. Used in p_b or vbd_window')
    parser.add_argument('--from-digit',
                        type=int,
                        default=8,
                        help='mask from some digits')
    parser.add_argument('--to-digit',
                        type=int,
                        default=3,
                        help='masked to some digits')
    parser.add_argument('--verbose', type=int, default=1)
    parser.add_argument('--top_n',
                        type=int,
                        default=1,
                        help='-1 means whole test sets')
    parser.add_argument('--no-cuda',
                        action='store_false',
                        default=True,
                        help='disables CUDA training')
    parser.add_argument('--visualize', action='store_false', default=True)
    args, _ = parser.parse_known_args()
    args.cuda = not args.no_cuda and torch.cuda.is_available()
    print 'cuda:', args.cuda

    # CS server settings
    if args.cuda and pkgutil.find_loader('gpu_lock') is not None:
        import gpu_lock
        board = gpu_lock.obtain_lock_id()
        torch.cuda.set_device(board)
        print 'start using gpu device: %d' % board

    return args
def LockGPU(max_retries=10):
  """ Locks a free GPU board and returns its id. """
  for retry_count in range(max_retries):
    board = gpu_lock.obtain_lock_id()
    if board != -1:
      break
    time.sleep(1)
  if board == -1:
    print 'No GPU board available.'
    sys.exit(1)
  else:
    import theano.sandbox.cuda
    theano.sandbox.cuda.use('gpu'+str(board))
  return board
def LockGPU(max_retries=10):
    """ Locks a free GPU board and returns its id. """
    for retry_count in range(max_retries):
        board = gpu_lock.obtain_lock_id()
        if board != -1:
            break
        time.sleep(1)
    if board == -1:
        print 'No GPU board available.'
        sys.exit(1)
    else:
        import theano.sandbox.cuda
        theano.sandbox.cuda.use('gpu' + str(board))
    return board
Example #4
0
    singleSoftmax(xGPU, tempRow)
    xGPU.copy_to_host()
    diff = xGPU.numpy_array-r
    print num.sum(num.abs(diff))
    #testMaskedSM()

    col = cm.CUDAMatrix(reformat(num.random.rand(5,1)))
    print col.shape
    col.copy_to_host()
    print col.numpy_array
    col.reshape((1,5))
    print col.shape
    col.copy_to_host()
    print col.numpy_array
    garb = cm.CUDAMatrix(reformat(num.zeros((5,5))))
    garb.set_row_slice(2,3,col)
    garb.copy_to_host()
    print garb.numpy_array
    
if __name__ == "__main__":
    print "export LD_LIBRARY_PATH=/u/gdahl/cudaLearn/"
    print "export CUDAMATDIR=/u/gdahl/cudaLearn"
    
    devId = gpu_lock.obtain_lock_id()
    cm.cuda_set_device(devId)
    
    cm.cublas_init()
    cm.CUDAMatrix.init_random(1)
    main()
    cm.cublas_shutdown()
# stolen from Ilya (/u/ilya/py/gpu.py)
import gpu_lock
lock_id = gpu_lock.obtain_lock_id()
import os
os.environ['THEANO_FLAGS'] = 'mode=FAST_RUN,device=gpu%s,floatX=float32' % \
    lock_id
import theano