示例#1
0
 def check_devices():
     for i in range(device_count()):
         print("Found device {}:".format(i), get_device_name(i))
     if device_count() == 0:
         print("No GPU device found")
     else:
         print("Current cuda device is", get_device_name(current_device()))
示例#2
0
def get_info():
    return {
        "has_cuda":
        cuda.is_available(),
        "devices": [] if not cuda.is_available() else
        [cuda.get_device_name(i) for i in range(cuda.device_count())],
    }
示例#3
0
    def system_info(self):
        uname = platform.uname()
        gpus = [cuda.get_device_name(i) for i in range(cuda.device_count())]

        self.update({
            'python':
            platform.python_version(),
            'machine':
            uname.machine,
            'processor':
            uname.processor,
            'os':
            os.name,
            'os_name':
            platform.system(),
            'os_ver':
            platform.release(),
            'memory':
            str(psutil.virtual_memory().total // 2**30) + ' GB',
            'storage':
            str(psutil.disk_usage('/').total // 2**30) + ' GB',
            'user':
            pwd.getpwuid(os.getuid())[0],
            'gpus':
            gpus,
            'timestamp':
            datetime.now().strftime('%f-%S-%M-%H-%d-%m-%Y')
        })
示例#4
0
def system_info():
    print(sys.version, "\n")
    print("PyTorch {}".format(torch.__version__), "\n")
    print("Torch-vision {}".format(torchvision.__version__), "\n")
    print("Available devices:")
    if cuda.is_available():
        for i in range(cuda.device_count()):
            print("{}: {}".format(i, cuda.get_device_name(i)))
    else:
        print("CPUs")
示例#5
0
def get_device(force_cpu: bool) \
        -> Tuple[str, str]:
    """Gets the available device.

    :param force_cpu: Force CPU usage?
    :type force_cpu: bool
    :return: Device and device name.
    :rtype: str, str
    """
    return ('cuda', cuda.get_device_name(cuda.current_device())) \
        if cuda.is_available() and not force_cpu else \
        ('cpu', processor())
示例#6
0
def device_info(the_device: str) \
        -> None:
    """Prints an informative message about the device that we are using.

    :param the_device: The device.
    :type the_device: str
    """
    from torch.cuda import get_device_name, current_device
    from platform import processor
    actual_device = get_device_name(current_device()) \
        if the_device.startswith('cuda') else processor()
    cmd_msg(f'Using device: `{actual_device}`.')
示例#7
0
def run(txtvar, st_aug, data_dir, logdir, n_gpus):
    msg1 = 'Training {} model on {} variable'.format(args.bert_model, txtvar)
    test_outpath = args.bert_model + txtvar
    if st_aug:
        msg1 += ' with semantic types'
        test_outpath += '_st'
    test_outpath += '.txt'
    print('=========')
    print(msg1)
    s = time()
    model = MIMICBERTReadmissionPredictor(
        n_train_fp=os.path.join(data_dir, 'notes_train_seeded.csv'),
        r_train_fp=os.path.join(data_dir, 'readmission_train_seeded.csv'),
        n_test_fp=os.path.join(data_dir, 'notes_test_seeded.csv'),
        r_test_fp=os.path.join(data_dir, 'readmission_test_seeded.csv'),
        epochs=4,
        val_frac=0.2,
        batch_size=args.batch,
        lr=0.01,
        momentum=0.5,
        bert_model=args.bert_model,
        txtvar=txtvar,
        seqlen=512,
        st_aug=st_aug,
        db=args.debug,
        write_test_results_to=os.path.join(logdir, test_outpath),
        verbose=args.verbose)
    trainer = Trainer(
        default_root_dir=logdir,
        gpus=(n_gpus if cuda.is_available() else 0),
        max_epochs=1 if args.debug else args.epochs,
        logger=(TensorBoardLogger(logdir, name='tb') if args.log else None),
        fast_dev_run=args.debug,
        distributed_backend='ddp',
        accumulate_grad_batches=args.grad_accum)
    print('GPUs used;')
    if cuda.is_available and n_gpus > 0:
        for i in range(n_gpus):
            print(cuda.get_device_name(i))
    else:
        print('None')
    trainer.fit(model)

    print('Pickling model...')
    out = 'mimic_' + args.bert_model
    if st_aug: out += '_st'
    save(model.state_dict(), os.path.join(data_dir, out + '.pt'))

    trainer.test(model)

    e = time()
    print('Training, pickling & testing time: {:.4f}'.format(e - s))
    print('=========\n\n')
示例#8
0
def get_device(verbose=True):
    '''
    Gets pytorch current available device
    '''
    print("CUDA device available? ", end='')
    if torch.cuda.is_available() is True:
        device = torch.device("cuda:0")
        print("yes: " + str(device))
        print("GPU:" + str(get_device_name(0)))
    else:
        device = torch.device("cpu")
        print("no. Using CPU")
    return device
示例#9
0
 def get_devices(self, format: bool = False) -> Dict[int, Dict[str, Any]]:
     devs = {}
     for i in range(self.num_devices):
         memory = dict(
             reserved=cuda.memory_reserved(i),
             allocated=cuda.memory_allocated(i),
             total=cuda.get_device_properties(i).total_memory,
         )
         if format:
             for k, v in memory.items():
                 memory[k] = f'{memory[k]/1e9:.2f} GB'
         devs[i] = dict(name=cuda.get_device_name(i), memory=memory)
     return devs
示例#10
0
def __print_init_message():
    from magnet.utils.misc import in_notebook

    if not in_notebook:
        print('MagNet Inside')
        return

    from torch.cuda import get_device_name
    if device.type == 'cpu':
        print("Running your code on a boring CPU.")
    else:
        print('Accelerating your code on a shiney new', get_device_name(0),
              '.')
示例#11
0
def set_cuda_devices(cpu_only, gpu_ids=None, logger=None):
    if gpu_ids is None:
        gpu_ids = list(range(cuda.device_count()))
    use_cuda = cuda.is_available() and not cpu_only and gpu_ids
    if use_cuda:
        os.environ["CUDA_VISIBLE_DEVICES"] = " ".join(map(str, gpu_ids))
        if logger:
            for gpu_id in gpu_ids:
                device_name = cuda.get_device_name(gpu_id)
                logger.info("CUDA %s : %s", gpu_id, device_name)
    elif logger:
        gpu_ids = []
        logger.info("CUDA is disabled, CPU only")
    return use_cuda, gpu_ids
示例#12
0
def config_cuda(use_cuda):
    if not use_cuda:
        print('Using cpu')
        torch.device('cpu')
        return 'cpu'
    elif not cuda.is_available():
        print('Cuda not found, using cpu')
        torch.device('cpu')
        return 'cpu'
    print('Configuring cuda...')
    torch.device('cuda')
    cuda.set_device(0)
    current_dev = cuda.current_device()
    current_dev_name = cuda.get_device_name(current_dev)
    current_dev_specs = cuda.get_device_properties(current_dev)

    print(f'Current Device: {current_dev}')
    print(f'Current Device Name: {current_dev_name}')
    print(f'Current Device Specs: {current_dev_specs}')
    print()

    return 'cuda'
示例#13
0
# Whether to train on a gpu
train_on_gpu = cuda.is_available()

print(f'Train on gpu: {train_on_gpu}')

# Number of gpus
if train_on_gpu:
    gpu_count = cuda.device_count()
    print(f'{gpu_count} gpus detected.')
    if gpu_count > 1:
        multi_gpu = True
    else:
        multi_gpu = False

device_name = cuda.get_device_name()
print(device_name)

# DATA_ROOT = '/data1/wenjie/github/pytorch_challenge/data/'
# CALTECH101_ORIGINAL = os.path.join(DATA_ROOT, '101_ObjectCategories')
# CALTECH101_ROOT = os.path.join(DATA_ROOT, 'caltech101_fisheye_Augmentation')
#split data into train, val and test set
# from utils import split_dataset_folder
# split_dataset_folder(CALTECH101_ORIGINAL, CALTECH101_ROOT)

# Image transformations
image_transforms = {
    # Train uses data augmentation
    'train':
    transforms.Compose([
        transforms.RandomResizedCrop(size=256, scale=(0.8, 1.0)),
示例#14
0
文件: cuda.py 项目: debadeepta/pcprep
import tensorflow as tf
hello = tf.constant("hello TensorFlow!")
sess=tf.Session() 
sess.run(hello)

# find PyTorch packages
import pkg_resources
l = [d for d in pkg_resources.working_set  if 'pytorch' in str(d)]
print(l)

# confirm PyTorch sees the GPU
from torch import cuda
import torch
print('PyTorch version', torch.__version__)
print('PyTorch cuda available', cuda.is_available())
print('PyTorch device count', cuda.device_count())
print('PyTorch device', cuda.get_device_name(cuda.current_device()))

# confirm Keras sees the GPU
from keras import backend
print('keras GPUs:', backend.tensorflow_backend._get_available_gpus())

import os
os.system('nvidia-smi')
os.system('nvcc --version')

import ray
ray.init(num_gpus=1)
print('ray GPU IDs', ray.get_gpu_ids())
示例#15
0
def get_memory_use():
    device = cuda.current_device()
    message = cuda.get_device_name(device) + ':\n'
    message += 'allocated:' + str(cuda.memory_allocated(device)) + '/' + str(cuda.max_memory_allocated()) + '\n'
    message += 'cached:' + str(cuda.memory_cached(device)) + '/' + str(cuda.max_memory_cached()) + '\n'
    return message
示例#16
0
import tensorflow as tf

#%% Check that gpu is available

from tensorflow.python.client import device_lib
assert 'GPU' in str(device_lib.list_local_devices())

# confirm Keras sees the GPU
from keras import backend
assert len(backend.tensorflow_backend._get_available_gpus()) > 0

# confirm PyTorch sees the GPU
from torch import cuda
assert cuda.is_available()
assert cuda.device_count() > 0
print(cuda.get_device_name(cuda.current_device()))


#%%

cb = [ModelCheckpoint("model.hdf5", save_best_only=True, period=3)]

model = Sequential()
model.add(CuDNNGRU(48, input_shape=(None, n_features)))
model.add(Dense(10, activation='relu'))
model.add(Dense(1))

model.summary()
#%%

# Compile and fit model
示例#17
0
def main():
    args = parse_args()
    identification = 'siamese_w6_{}_epochs_{}_dims_{}_{}'.format(
        os.path.basename(args.dataset_dir), args.epochs, args.dims,
        datetime.datetime.now())
    writer = SummaryWriter(log_dir='../runs/{}'.format(identification))

    if cuda.is_available():
        print('Device: {}'.format(cuda.get_device_name(0)))

    train_transform, test_transform = get_transforms(args.input_size)

    train_set = Dataset(args.dataset_dir,
                        train_transform,
                        min_images=args.min_images)
    train_batch_sampler = BalancedBatchSampler(train_set.targets,
                                               n_classes=10,
                                               n_samples=10)
    train_loader = DataLoader(train_set,
                              batch_sampler=train_batch_sampler,
                              num_workers=4)
    print(train_set)

    test_loader = None
    if args.validation_dir is not None:
        test_set = Dataset(args.validation_dir,
                           transform=test_transform,
                           min_images=args.min_images)
        test_loader = DataLoader(test_set,
                                 batch_size=args.batch_size,
                                 shuffle=False,
                                 num_workers=4)
        print(test_set)

    model = EmbeddingNet(args.dims)
    if cuda:
        model = model.cuda()
    print(model)

    criterion = OnlineTripletLoss(margin=1.0)
    optimizer = Adam(model.parameters(), lr=1e-4)
    scheduler = StepLR(optimizer, 8, gamma=0.1, last_epoch=-1)

    fit(train_loader,
        test_loader,
        model,
        criterion,
        optimizer,
        scheduler,
        args.epochs,
        cuda,
        writer=writer)

    train_embeddings, train_targets = extract_embeddings(
        train_loader, model, cuda)
    writer.add_embedding(train_embeddings,
                         metadata=train_targets,
                         tag='Train embeddings')

    if test_loader is not None:
        test_embeddings, test_targets = extract_embeddings(
            test_loader, model, cuda)
        writer.add_embedding(test_embeddings,
                             metadata=test_targets,
                             tag='Test embeddings')

    print('Saving model...')
    torch.save(
        model.state_dict(),
        '../weights/{}_{}.pth'.format(identification, datetime.datetime.now()))
    print('Finished')
#%%
num = cuda.device_count()
num

#%% [markdown]
# - torch.cuda.get_device_capability(device): 返回设备的cuda能力

#%%
cuda.get_device_capability(0)

#%% [markdown]
# - torch.cuda.get_device_name(device):返回设备名称

#%%
cuda.get_device_name(0)

#%% [markdown]
# - torch.cuda.max_memory_allocated(device):返回指定设备张量的最大GPU内存用量

#%%
cuda.max_memory_allocated(0)

#%%
device = torch.device('cuda') if cuda.is_available() else torch.device('cpu')
X = torch.randn(100, 100, device=device)
X.shape

#%%
cuda.max_memory_allocated(0)
""" Configure seed """
config.seed = 0
""" Create scenario """
my_map = np.genfromtxt(sys.path[0] + '/example_map.csv', delimiter=',')
config.environment = DiscreteIPP(domain_type='Discrete',
                                 scenario_map=my_map,
                                 number_of_features=100,
                                 detection_area_ratio=2)

config.state_size = config.environment.reset().shape
config.action_size = config.environment.action_space.n

# Configure device
if check_if_cuda_available():
    config.device = 'cuda:0'
    config.device_name = get_device_name(0)

else:
    config.device = 'cpu'
    config.device_name = 'CPU'
""" Configure simulation conditions """
config.number_of_episodes = 3000
""" Hyperparameters """
config.hyperparameters = {
    'buffer_size': 10000,
    'batch_size': 64,
    'seed': config.seed,
    'learning_rate': 1e-4,
    'initial_epsilon': 1,
    'epsilon_decrement': 1 / config.number_of_episodes * 2,
    'epsilon_min': 0.05,
示例#20
0
 def name(self):
     if is_available():
         return get_device_name(self.device)
     return 'Cuda is not available.'