def __init__(self, gpu=0): print("start") self.root = "./images/" self.batchsize = 1 self.outdir = self.root + "out/" self.outdir_min = self.root + "out_min/" self.gpu = gpu self._dtype = np.float32 print("load model") if self.gpu >= 0: cuda.get_device(self.gpu).use() cuda.set_max_workspace_size(64 * 1024 * 1024) # 64MB chainer.Function.type_check_enable = False self.cnn_128 = unet.UNET() self.cnn_512 = unet.UNET() if self.gpu >= 0: self.cnn_128.to_gpu() self.cnn_512.to_gpu() #lnn = lnet.LNET() #serializers.load_npz("./cgi-bin/wnet/models/model_cnn_128_df_4", cnn_128) #serializers.load_npz("./cgi-bin/paint_x2_unet/models/model_cnn_128_f3_2", cnn_128) serializers.load_npz( "./cgi-bin/paint_x2_unet/models/unet_128_standard", self.cnn_128) #serializers.load_npz("./cgi-bin/paint_x2_unet/models/model_cnn_128_ua_1", self.cnn_128) #serializers.load_npz("./cgi-bin/paint_x2_unet/models/model_m_1.6", self.cnn) serializers.load_npz( "./cgi-bin/paint_x2_unet/models/unet_512_standard", self.cnn_512)
def __init__(self, gpu=0): current_path = os.path.dirname(__file__) print("start") two_up = os.path.abspath(os.path.join(__file__, "../../../")) # print(two_up) self.root = os.path.join(two_up + "/static/paintschainer/images/") self.batchsize = 1 self.outdir = self.root + "out/" self.outdir_min = self.root + "out_min/" self.gpu = gpu self._dtype = np.float32 print("load model") if self.gpu >= 0: cuda.get_device(self.gpu).use() cuda.set_max_workspace_size(64 * 1024 * 1024) # 64MB chainer.Function.type_check_enable = False self.cnn_128 = unet.UNET() self.cnn_512 = unet.UNET() if self.gpu >= 0: self.cnn_128.to_gpu() self.cnn_512.to_gpu() #lnn = lnet.LNET() #serializers.load_npz("./cgi-bin/wnet/models/model_cnn_128_df_4", cnn_128) #serializers.load_npz("./cgi-bin/paint_x2_unet/models/model_cnn_128_f3_2", cnn_128) serializers.load_npz( os.path.join(current_path + "/models/unet_128_standard"), self.cnn_128) #serializers.load_npz("./cgi-bin/paint_x2_unet/models/model_cnn_128_ua_1", self.cnn_128) #serializers.load_npz("./cgi-bin/paint_x2_unet/models/model_m_1.6", self.cnn) serializers.load_npz( os.path.join(current_path + "/models/unet_512_standard"), self.cnn_512)
def __init__(self, gpu=0): self.log_debug('加载模型...') self.batchsize = 1 self.gpu = gpu self._dtype = np.float32 if not os.path.isfile( "./genColorForAnimation/models/unet_128_standard"): self.log_error( '无法加载模型“./genColorForAnimation/models/unet_128_standard”') return if not os.path.isfile( "./genColorForAnimation/models/unet_512_standard"): self.log_error( '无法加载模型“./genColorForAnimation/models/unet_512_standard”') return self.log_debug('成功加载模型') if self.gpu >= 0: self.log_debug('使用GPU: %d' % gpu) cuda.get_device(self.gpu).use() cuda.set_max_workspace_size(64 * 1024 * 1024) # 64MB chainer.Function.type_check_enable = False self.cnn_128 = unet.UNET() self.cnn_512 = unet.UNET() if self.gpu >= 0: self.cnn_128.to_gpu() self.cnn_512.to_gpu() serializers.load_npz("./genColorForAnimation/models/unet_128_standard", self.cnn_128) serializers.load_npz("./genColorForAnimation/models/unet_512_standard", self.cnn_512)
def __init__(self, gpu=0, colormode='LAB', normalized=False): print("start") self.root = "./images/" self.batchsize = 1 self.outdir = self.root + "out/" self.outdir_min = self.root + "out_min/" self.gpu = gpu self._dtype = np.float32 self._colormode = colormode self._norm = normalized print("load model") if self.gpu >= 0: cuda.get_device(self.gpu).use() cuda.set_max_workspace_size(64 * 1024 * 1024) # 64MB chainer.Function.type_check_enable = False if self._colormode == 'YUV': self.cnn_128 = unet.UNET() # self.cnn_512 = unet.UNET() elif self._colormode == 'LAB': self.cnn_128 = unet.UNET(inputChannel=3, outputChannel=2) # self.cnn_512 = unet.UNET() if self.gpu >= 0: self.cnn_128.to_gpu() # self.cnn_512.to_gpu() if self._colormode == 'YUV': serializers.load_npz("./src/colorize/models/unet_128_standard-YUV", self.cnn_128) elif self._colormode == 'LAB': serializers.load_npz("./src/colorize/models/cnn_128_iter_370000", self.cnn_128)
def __init__(self, gpu=0): rootPath = os.path.dirname( os.path.abspath( os.path.dirname(os.path.abspath(os.path.dirname(__file__))))) print("start") self.root = str(rootPath) self.batchsize = 1 self.outdir = self.root + "out/" self.outdir_min = self.root + "out_min/" self.gpu = gpu self._dtype = np.float32 print(rootPath) if not os.path.isfile( str(rootPath) + "/downloads/painting_model/unet_128_standard"): print( "./models/unet_128_standard not found. Please download them from http://paintschainer.preferred.tech/downloads/" ) if not os.path.isfile( str(rootPath) + "/downloads/painting_model/unet_512_standard"): print( "./models/unet_512_standard not found. Please download them from http://paintschainer.preferred.tech/downloads/" ) print("load model") if self.gpu >= 0: cuda.get_device(self.gpu).use() cuda.set_max_workspace_size(64 * 1024 * 1024) # 64MB chainer.Function.type_check_enable = False self.cnn_128 = unet.UNET() self.cnn_512 = unet.UNET() if self.gpu >= 0: self.cnn_128.to_device(self.gpu) self.cnn_512.to_device(self.gpu) #lnn = lnet.LNET() #serializers.load_npz("./cgi-bin/wnet/models/model_cnn_128_df_4", cnn_128) #serializers.load_npz("./cgi-bin/paint_x2_unet/models/model_cnn_128_f3_2", cnn_128) serializers.load_npz( str(rootPath) + "/downloads/painting_model/unet_128_standard", self.cnn_128) #serializers.load_npz("./cgi-bin/paint_x2_unet/models/model_cnn_128_ua_1", self.cnn_128) #serializers.load_npz("./cgi-bin/paint_x2_unet/models/model_m_1.6", self.cnn) serializers.load_npz( str(rootPath) + "/downloads/painting_model/unet_512_standard", self.cnn_512)
def __init__(self, gpu=0): print("start") self.root = "./images/" self.gpu = gpu print("load model") if self.gpu >= 0: cuda.get_device(self.gpu).use() cuda.set_max_workspace_size(64 * 1024 * 1024) # 64MB chainer.Function.type_check_enable = False self.cnn_128 = unet.UNET() self.cnn = unet.UNET() if self.gpu >= 0: self.cnn_128.to_gpu() self.cnn.to_gpu() serializers.load_npz( "./cgi-bin/paint_x2_unet/models/unet_128_standard", self.cnn_128) serializers.load_npz( "./cgi-bin/paint_x2_unet/models/unet_512_standard", self.cnn)
def __init__(self, gpu=-1): print("start") self.root = "./" self.batchsize = 1 self.outdir = self.root self.outdir_min = self.root + "out_min/" self.gpu = gpu self._dtype = np.float32 print("load model") if self.gpu >= 0: cuda.get_device(self.gpu).use() cuda.set_max_workspace_size(1024 * 1024 * 1024) # 64MB chainer.Function.type_check_enable = False self.cnn_128 = unet.UNET() self.cnn_512 = unet.UNET() if self.gpu >= 0: self.cnn_128.to_gpu() self.cnn_512.to_gpu() serializers.load_npz("./db/unet_128_standard", self.cnn_128) serializers.load_npz("./db/unet_512_standard", self.cnn_512)
def test_size(self): size = 1024 cuda.set_max_workspace_size(size) self.assertEqual(size, cuda.get_max_workspace_size())
def tearDown(self): cuda.set_max_workspace_size(self.space)
'measured with corresponding cache enabled. ') parser.add_argument('--batchsize', '-b', type=int, default=32, help='Batchsize') args = parser.parse_args() print(args) numpy.random.seed(args.seed) if args.gpu >= 0: cuda.cupy.random.seed(args.seed) if args.gpu >= 0: if args.workspace_ratio < 0.0 or args.workspace_ratio > 1.0: raise ValueError('Invalid workspace ratio:{} (valid interval:[0.0,1.0])'.format(args.workspace_ratio)) _free_mem, total_mem = cuda.cupy.cuda.runtime.memGetInfo() size = long(total_mem * args.workspace_ratio) cuda.set_max_workspace_size(size) in_channels = 3 label_num = 100 if args.predictor == 'inception-v3': predictor = net.inception_v3.InceptionV3(use_cudnn=args.cudnn) model = net.inception_v3.InceptionV3Classifier(predictor) in_size = 299 elif args.predictor == 'alex-owt': predictor = net.alex_owt.AlexOWT(use_cudnn=args.cudnn) model = L.Classifier(predictor) in_size = 224 elif args.predictor == 'vgg-d': predictor = net.vgg_d.VGG_D(use_cudnn=args.cudnn) model = L.Classifier(predictor)
import os import random import warnings import yaml import chainer import chainermn import numpy import tgan2 from chainer import cuda from chainer import training from chainer.training import extensions from tgan2.utils import make_instance cuda.set_max_workspace_size(1024 * 1024 * 1024) chainer.global_config.autotune = True chainer.config.comm = None def get_device_communicator(gpu, communicator, seed, batchsize): if gpu: if communicator == 'naive': print('Error: \'naive\' communicator does not support GPU.\n') exit(-1) comm = chainermn.create_communicator(communicator) device = comm.intra_rank else: if communicator != 'naive': print('Warning: using naive communicator ' 'because only naive supports CPU-only execution')