コード例 #1
0
def set_devices():
    if (cn.__version__ != '2.2'):
        raise Exception('Invalid CNTK Version')
    all_devices = device.all_devices()
    if all_devices[0].type() == device.DeviceKind.GPU:
        print('You can use the GPU of your computer!!!')
        device.try_set_default_device(device.gpu(0))
    else:
        print('Sorry, your computer only has a slow CPU')
        device.try_set_default_device(device.cpu())
コード例 #2
0
ファイル: devices.py プロジェクト: PabloDoval/VeCNTK
def set_devices():
    if (cn.__version__ != '2.4'):
        raise Exception('[ERROR]: Invalid CNTK Version')
    all_devices = device.all_devices()
    if all_devices[0].type() == device.DeviceKind.GPU:
        print('[INFO]: You computer' 's GPU does suport CUDA acceleration')
        device.try_set_default_device(device.gpu(0))
    else:
        print('[WARNING]: You computer'
              's GPU does not suport CUDA acceleration')
        device.try_set_default_device(device.cpu())
コード例 #3
0
log("СКРИПТ ОБУЧЕНИЯ " + prediction_algorithm_name + " ЗАПУЩЕН...") 

# секундомер
import time
tempTime = time.time()
def getTime():
    global tempTime 
    offset = time.time() - tempTime
    tempTime = time.time()
    return str(offset)[0:5] + " сек."
#####################################

# попытка задать GPU, как устройство для ускорения вычислений
from cntk.device import try_set_default_device, gpu
import cntk.device as C
log("Все вычислительные устройства: " + str(C.all_devices()))
try:
    log("Попытка установить GPU как устройство по умолчанию: " + str(C.try_set_default_device(C.gpu(0))))
except Exception as e:
    log(str(e))   
#log(C.use_default_device())
###################################################

#  загрузка библиотек
import numpy
import json
from keras.models import Sequential
from keras.layers import Dense, LSTM, Dropout, Conv1D, GlobalAveragePooling1D, MaxPooling1D, Flatten
from keras import optimizers
#####################################################################
log("> время загрузки библиотек : " + getTime())  
コード例 #4
0
    communicator.barrier()

    # train in parallel
    error = cifar_resnet_distributed(data_path,
                                     load_model_filename=start_model,
                                     communicator=communicator,
                                     run_test=True,
                                     num_epochs=num_parallel_epochs)

    distributed.Communicator.finalize()
    return error


if __name__ == '__main__':
    # check if we have multiple-GPU, and fallback to 1 GPU if not
    devices = device.all_devices()
    gpu_count = 0
    for dev in devices:
        gpu_count += (1 if dev.type() == DeviceKind_GPU else 0)
    print("Found {} GPUs".format(gpu_count))

    if gpu_count == 0:
        print("No GPU found, exiting")
        quit()

    data_path = os.path.abspath(
        os.path.normpath(
            os.path.join(
                *"../../../../Examples/Image/DataSets/CIFAR-10/".split("/"))))

    os.chdir(data_path)
コード例 #5
0
prediction_algorithm_name = 'Easy'
print("СКРИПТ ПОТОЧНОГО ПРОГНОЗИРОВАНИЯ " + prediction_algorithm_name +
      " ЗАПУЩЕН...")

import random
random.seed()
session = random.getrandbits(16)
print("session = " + (str)(session))

from cntk.device import try_set_default_device, gpu
import cntk.device as C
print(C.all_devices())
print(C.try_set_default_device(C.gpu(0)))
print(C.use_default_device())
import time
import sys
import argparse
import numpy
from datetime import datetime
from keras.models import load_model
import json


#print(sys.platform)
def createParser():
    parser = argparse.ArgumentParser()
    #parser.add_argument('--json_file_path',type=str,default='D:\Anton\Desktop\MAIN\Экспертная система\Экспертная система\Алгоритмы прогнозирования\LSTM 1\h.json')
    parser.add_argument('--json_file_path', type=str)
    return parser

コード例 #6
0
    # training the start model only in one worker
    if communicator.current_worker().global_rank == 0:
        cifar_resnet_distributed(data_path, save_model_filename=start_model, communicator=None, run_test=False, num_epochs=num_start_epochs)
    
    communicator.barrier()
    
    # train in parallel
    error = cifar_resnet_distributed(data_path, load_model_filename=start_model, communicator=communicator, run_test=True, num_epochs=num_parallel_epochs)

    distributed.Communicator.finalize()
    return error

    
if __name__ == '__main__':
    # check if we have multiple-GPU, and fallback to 1 GPU if not
    devices = device.all_devices()
    gpu_count = 0
    for dev in devices:
        gpu_count += (1 if dev.type() == DeviceKind_GPU else 0)
    print("Found {} GPUs".format(gpu_count))
    
    if gpu_count == 0:
        print("No GPU found, exiting")
        quit()

    data_path = os.path.abspath(os.path.normpath(os.path.join(
        *"../../../../Examples/Image/DataSets/CIFAR-10/".split("/"))))

    os.chdir(data_path)
    
    total_epochs = 11