Ejemplo n.º 1
0
    def __init__(self, in_features: int, out_features: int, bias: bool = True, activation="ReLU", hidden_dim=None, hidden_activation="ReLU") -> None:
        super(Linear, self).__init__()
        self.in_features = in_features
        self.out_features = out_features
        self.hidden_dim = hidden_dim
        self.hidden_activation = hidden_activation
        if hidden_dim is None:
            self.dims = vector(in_features, out_features)
            self.weight = Parameter(torch.zeros(out_features, in_features))
            if bias:
                self.bias = Parameter(torch.zeros(out_features))
            else:
                self.register_parameter('bias', None)
            self.activation = get_activation_layer(activation)
        else:
            self.dims = vector(in_features, *vector(hidden_dim), out_features)
            self.weight = nn.ParameterList(self.dims.map_k(lambda in_dim, out_dim: Parameter(torch.zeros(out_dim, in_dim)), 2))
            if bias:
                self.bias = nn.ParameterList(self.dims.map_k(lambda in_dim, out_dim: Parameter(torch.zeros(out_dim)), 2))
            else:
                self.register_parameter('bias', None)
            self.activation = vector(get_activation_layer(hidden_activation) for _ in range(len(hidden_dim)))
            self.activation.append(get_activation_layer(activation))

        self.reset_parameters()
Ejemplo n.º 2
0
 def __init__(self, length, shapes, dtypes=float):
     self.length = length
     self.shapes = shapes
     if isinstance(shapes, Iterable) and len(shapes) == 0:
         raise ValueError
     if isinstance(shapes, tuple):
         self.num_var = 1
         self.shapes = vector([shapes])
     elif isinstance(shapes, list):
         self.num_var = len(shapes)
         self.shapes = vector(shapes).map(lambda x: totuple(x))
     elif isinstance(shape, int):
         self.num_var = 1
         self.shapes = vector([(shapes, )])
     if isinstance(dtypes, type):
         self.dtypes = vector([dtypes for _ in range(self.num_var)])
     else:
         self.dtypes = vector(dtypes)
     self.init()
Ejemplo n.º 3
0
 def __init__(self, *args, symbol="x"):
     if len(args) == 1 and isinstance(args[0], vector):
         self.symbol = args[0].flatten()[0].split("[")[0]
         self.shape = args[0].shape
         self.content = args[0].copy()
     else:
         args = totuple(args)
         self.symbol = symbol
         self.shape = args
         self.content = vector.meshgrid(vector(args).map(lambda n: vector.range(1, n + 1))).map(lambda loc: "{}[{}]".format(symbol, ",".join(str(l) for l in loc))).reshape(args)
Ejemplo n.º 4
0
 def test_set(self):
     if hasattr(self, "_CelebA__test_set_vector"):
         return self.__test_set_vector
     self.__test_set_vector = vector(
         self.__test_set,
         str_function=lambda x: "\n".join([
             "Dataset CelebA", "    Number of datapoints: {}".format(
                 x.length), "    Split: Test"
         ]))
     return self.__test_set_vector
Ejemplo n.º 5
0
 def train_set(self):
     if hasattr(self, "_MNIST__train_set_vector"):
         return self.__train_set_vector
     self.__train_set_vector = vector(
         self.__train_set,
         str_function=lambda x: "\n".join([
             "Dataset MNIST", "    Number of datapoints: {}".format(
                 x.length), "    Split: Train"
         ]))
     return self.__train_set_vector
Ejemplo n.º 6
0
    def __init__(self, root="", name=None, urls=NoDefault, download=False):
        self._name = name
        if root:
            self.root = (path(root) / "dataset" / self.name).abs()
            self.root.assign_mainfolder(path(root).abs())
        else:
            self.root = path("./dataset/{}".format(self.name)).abs()
            self.root.assign_mainfolder(path(".").abs())

        self.raw_folder = self.root / "raw"
        self.processed_folder = self.root / "processed"

        temp = self.root.mkdir(True)
        mk_dirs = vector()
        if temp:
            mk_dirs.append(temp)

        self.urls = vector(urls)

        if download:
            mk_dirs.extend(
                vector(self.raw_folder, self.processed_folder).map(
                    lambda x: x.mkdir(True)).filter())
            self.download()
            if not self.check(only_raw=True):
                print("Download failed")

        self._raw_files = None
        self._processed_files = None

        if bool(self.raw_files) and not bool(self.processed_files):
            if self.raw_files.length == 1:
                obj = self.raw_files[0]
                if obj.endswith("tar.gz"):
                    self.untar(obj, self.processed_folder)
                elif obj.endswith("zip"):
                    self.unzip(obj, self.processed_folder)

        if not self.check():
            print(
                "Dataset not found. You can use download=True to download it.")
            mk_dirs.apply(lambda x: x.rm())
Ejemplo n.º 7
0
def get_shape(input):
    if isinstance(input, list):
        input = vector(input)
        l_shape = input.map(get_shape)
        if l_shape.all(lambda x: x == l_shape[0]):
            return "L{}".format(len(l_shape)) + ("[{}]".format(l_shape[0]) if not l_shape[0].startswith("[") else l_shape[0])
        else:
            return "[{}]".format(", ".join(l_shape))
    if isinstance(input, tuple):
        input = vector(input)
        l_shape = input.map(get_shape)
        if l_shape.all(lambda x: x == l_shape[0]):
            return "T{}".format(len(l_shape)) + ("[{}]".format(l_shape[0]) if not l_shape[0].startswith("[") else l_shape[0])
        else:
            return "[{}]".format(", ".join(l_shape))
    if isinstance(input, torch.Tensor):
        return str(input.shape)
    if isinstance(input, np.ndarray):
        return str(input.shape)
    return str(type(input))[8:-2]
Ejemplo n.º 8
0
 def processed_files(self):
     if touch(lambda: self._processed_files, None):
         return self._processed_files
     if self.processed_folder.isdir():
         temp = self.processed_folder.ls()
         while len(temp) == 1 and temp[0].isdir():
             temp = temp[0].ls()
         self._processed_files = temp
     else:
         self._processed_files = vector()
     return self._processed_files
Ejemplo n.º 9
0
    def show_image(self, image, y_labels=None):

        import matplotlib.pyplot as plt
        if isinstance(image, torch.Tensor) and (image.dim() == 2
                                                or image.shape[0] == 1):
            image = image.squeeze()
            plt.imshow(image, cmap="gray", interpolation=None)
            if y_labels is not None:
                plt.title("Ground Truth: {}".format(y_labels))
            plt.xticks([])
            plt.yticks([])
            plt.show()
        elif isinstance(image, list) or isinstance(image, tuple) or isinstance(
                image, torch.Tensor) and image.dim() == 3:
            if isinstance(image, tuple):
                image = vector([image])
            if isinstance(image, list):
                n = image.shape[0]
            else:
                n = length(image)
            if n > 100:
                raise RuntimeError(
                    "{} images are displaied simutaneously".format(n))
            width = math.ceil(math.sqrt(n))
            for index in range(n):
                plt.subplot(math.ceil(n / width), width, index + 1)
                plt.tight_layout()
                if isinstance(image[index], tuple):
                    plt.imshow(image[index][0].squeeze(),
                               cmap="gray",
                               interpolation=None)
                    plt.title("Ground Truth: {}".format(image[index][1]))
                else:
                    plt.imshow(image[index].squeeze(),
                               cmap="gray",
                               interpolation=None)
                    if y_labels is not None:
                        plt.title("Ground Truth: {}".format(y_labels[index]))
                plt.xticks([])
                plt.yticks()
            plt.show()
Ejemplo n.º 10
0
 def __init__(self, root="", name="", download=True):
     self.downloader = DataDownloader(
         root=root,
         name=(name if name else "BABI"),
         urls=[
             "http://www.thespermwhale.com/jaseweston/babi/tasks_1-20_v1-2.tar.gz"
         ],
         download=download)
     self.downloader._processed_files = self.downloader.processed_files.fuzzy_search(
         "en-10k").ls()
     self.rawdata = vector()
     priority = {"train": 0, "test": 1}
     for index in range(20):
         files = self.downloader.processed_files.filter(
             lambda x: "qa" + str(index + 1) == x.name.partition("_")[0])
         files = files.sort(lambda x: priority[x.name.rpartition("_")[2]])
         content = files.map(lambda x: x.readlines().vector())
         self.rawdata.append(
             RawDataSet(content, name=files[0].name.rpartition("_")[0]))
     self.id = dict(
         self.downloader.processed_files.map(lambda x: x.name).filter(
             lambda x: "train" in x).map(lambda x: x.rpartition("_")[0]).
         map(lambda x: tuple(x.split("_"))).sort(lambda x: int(x[0][2:])))
Ejemplo n.º 11
0
def free_memory_amount(device_number):
    nvmlInit()
    h = nvmlDeviceGetHandleByIndex(device_number)
    info = nvmlDeviceGetMemoryInfo(h)
    return info.free


def all_memory_amount(device_number):
    nvmlInit()
    h = nvmlDeviceGetHandleByIndex(device_number)
    info = nvmlDeviceGetMemoryInfo(h)
    return info.total


available_gpus_memory = vector(
    [free_memory_amount(i) for i in available_gpu_ids])
all_gpus_memory = vector([all_memory_amount(i) for i in available_gpu_ids])

warning_free_memory_threshold = eval(os.environ.get('CUDA_RUN_MEMORY', '5'))

if torch.cuda.is_available():
    most_available_gpus = available_gpus_memory.max(with_index=True)[1]

    if available_gpus_memory[
            most_available_gpus] < warning_free_memory_threshold * 1.074e+9:
        print("Warning: the best gpu device is device {}".format(
            most_available_gpus))
        print(
            "However, there are only {:.5} GB free memory memory in this GPU".
            format(available_gpus_memory[most_available_gpus] / 1.074e+9))
        tag = input("Do you want to proceed? [yes/no/y/n]:")
Ejemplo n.º 12
0
 def __dir__(self):
     return vector(super().__dir__())
Ejemplo n.º 13
0
from pyctlib import vector

torch_import_list = vector(["_C", "_VF.py", "__config__.py", "__future__.py", "__init__.py", "_appdirs.py", "_classes.py", "_fx", "_jit_internal.py", "_linalg_utils.py", "_lobpcg.py", "_lowrank.py", "_namedtensor_internals.py", "_ops.py", "_package", "_six.py", "_storage_docs.py", "_tensor_docs.py", "_tensor_str.py", "_torch_docs.py", "_utils.py", "_utils_internal.py", "_vmap_internals.py", "autograd", "backends", "contrib", "cuda", "distributed", "distributions", "fft", "functional.py", "futures", "hub.py", "include", "jit", "lib", "linalg", "multiprocessing", "nn", "onnx", "optim", "overrides.py", "quantization", "quasirandom.py", "random.py", "serialization.py", "share", "sparse", "storage.py", "tensor.py", "testing", "types.py", "utils", "version.py"]).map(lambda x: x.split(r".")[0])

torch_dtype_list = vector(['bfloat16', 'bool', 'cdouble', 'cfloat', 'complex128', 'complex32', 'complex64', 'double', 'float', 'float16', 'float32', 'float64', 'half', 'int', 'int16', 'int32', 'int64', 'int8', 'long', 'qint32', 'qint8', 'quint8', 'short', 'uint8'])

torch_func_list = vector(['Assert', 'Set', 'abs', 'abs_', 'absolute', 'acos', 'acos_', 'acosh', 'acosh_', 'adaptive_avg_pool1d', 'adaptive_max_pool1d', 'add', 'addbmm', 'addcdiv', 'addcmul', 'addmm', 'addmv', 'addmv_', 'addr', 'affine_grid_generator', 'align_tensors', 'all', 'allclose', 'alpha_dropout', 'alpha_dropout_', 'amax', 'amin', 'angle', 'any', 'arange', 'arccos', 'arccos_', 'arccosh', 'arccosh_', 'arcsin', 'arcsin_', 'arcsinh', 'arcsinh_', 'arctan', 'arctan_', 'arctanh', 'arctanh_', 'argmax', 'argmin', 'argsort', 'as_strided', 'as_strided_', 'as_tensor', 'asin', 'asin_', 'asinh', 'asinh_', 'atan', 'atan2', 'atan_', 'atanh', 'atanh_', 'atleast_1d', 'atleast_2d', 'atleast_3d', 'autocast_decrement_nesting', 'autocast_increment_nesting', 'avg_pool1d', 'baddbmm', 'bartlett_window', 'batch_norm', 'batch_norm_backward_elemt', 'batch_norm_backward_reduce', 'batch_norm_elemt', 'batch_norm_gather_stats', 'batch_norm_gather_stats_with_counts', 'batch_norm_stats', 'batch_norm_update_stats', 'bernoulli', 'bilinear', 'binary_cross_entropy_with_logits', 'bincount', 'binomial', 'bitwise_and', 'bitwise_not', 'bitwise_or', 'bitwise_xor', 'blackman_window', 'block_diag', 'bmm', 'broadcast_tensors', 'bucketize', 'can_cast', 'cartesian_prod', 'cat', 'cdist', 'ceil', 'ceil_', 'celu', 'celu_', 'chain_matmul', 'channel_shuffle', 'cholesky', 'cholesky_inverse', 'cholesky_solve', 'choose_qparams_optimized', 'chunk', 'clamp', 'clamp_', 'clamp_max', 'clamp_max_', 'clamp_min', 'clamp_min_', 'clear_autocast_cache', 'clip', 'clip_', 'clone', 'combinations', 'compiled_with_cxx11_abi', 'complex', 'conj', 'constant_pad_nd', 'conv1d', 'conv2d', 'conv3d', 'conv_tbc', 'conv_transpose1d', 'conv_transpose2d', 'conv_transpose3d', 'convolution', 'cos', 'cos_', 'cosh', 'cosh_', 'cosine_embedding_loss', 'cosine_similarity', 'count_nonzero', 'cross', 'ctc_loss', 'cudnn_affine_grid_generator', 'cudnn_batch_norm', 'cudnn_convolution', 'cudnn_convolution_transpose', 'cudnn_grid_sampler', 'cudnn_is_acceptable', 'cummax', 'cummin', 'cumprod', 'cumsum', 'deg2rad', 'deg2rad_', 'dequantize', 'det', 'detach', 'detach_', 'diag', 'diag_embed', 'diagflat', 'diagonal', 'digamma', 'dist', 'div', 'divide', 'dot', 'dropout', 'dropout_', 'dsmm', 'dstack', 'eig', 'einsum', 'embedding', 'embedding_bag', 'embedding_renorm_', 'empty', 'empty_like', 'empty_meta', 'empty_quantized', 'empty_strided', 'eq', 'equal', 'erf', 'erf_', 'erfc', 'erfc_', 'erfinv', 'exp', 'exp2', 'exp2_', 'exp_', 'expm1', 'expm1_', 'eye', 'fake_quantize_per_channel_affine', 'fake_quantize_per_tensor_affine', 'fbgemm_linear_fp16_weight', 'fbgemm_linear_fp16_weight_fp32_activation', 'fbgemm_linear_int8_weight', 'fbgemm_linear_int8_weight_fp32_activation', 'fbgemm_linear_quantize_weight', 'fbgemm_pack_gemm_matrix_fp16', 'fbgemm_pack_quantized_matrix', 'feature_alpha_dropout', 'feature_alpha_dropout_', 'feature_dropout', 'feature_dropout_', 'fill_', 'fix', 'fix_', 'flatten', 'flip', 'fliplr', 'flipud', 'floor', 'floor_', 'floor_divide', 'fmod', 'fork', 'frac', 'frac_', 'frobenius_norm', 'from_file', 'from_numpy', 'full', 'full_like', 'gather', 'gcd', 'gcd_', 'ge', 'geqrf', 'ger', 'get_default_dtype', 'get_device', 'get_file_path', 'get_num_interop_threads', 'get_num_threads', 'get_rng_state', 'greater', 'greater_equal', 'grid_sampler', 'grid_sampler_2d', 'grid_sampler_3d', 'group_norm', 'gru', 'gru_cell', 'gt', 'hamming_window', 'hann_window', 'hardshrink', 'heaviside', 'hinge_embedding_loss', 'histc', 'hsmm', 'hspmm', 'hstack', 'hypot', 'i0', 'i0_', 'ifft', 'imag', 'import_ir_module', 'import_ir_module_from_buffer', 'index_add', 'index_copy', 'index_fill', 'index_put', 'index_put_', 'index_select', 'init_num_threads', 'initial_seed', 'instance_norm', 'int_repr', 'inverse', 'irfft', 'is_anomaly_enabled', 'is_autocast_enabled', 'is_complex', 'is_deterministic', 'is_distributed', 'is_floating_point', 'is_grad_enabled', 'is_nonzero', 'is_same_size', 'is_signed', 'is_storage', 'is_tensor', 'is_vulkan_available', 'isclose', 'isfinite', 'isinf', 'isnan', 'isneginf', 'isposinf', 'isreal', 'istft', 'kaiser_window', 'kl_div', 'kthvalue', 'layer_norm', 'lcm', 'lcm_', 'le', 'lerp', 'less', 'less_equal', 'lgamma', 'linspace', 'load', 'lobpcg', 'log', 'log10', 'log10_', 'log1p', 'log1p_', 'log2', 'log2_', 'log_', 'log_softmax', 'logaddexp', 'logaddexp2', 'logcumsumexp', 'logdet', 'logical_and', 'logical_not', 'logical_or', 'logical_xor', 'logit', 'logit_', 'logspace', 'logsumexp', 'lstm', 'lstm_cell', 'lstsq', 'lt', 'lu', 'lu_solve', 'lu_unpack', 'manual_seed', 'margin_ranking_loss', 'masked_fill', 'masked_scatter', 'masked_select', 'matmul', 'matrix_exp', 'matrix_power', 'matrix_rank', 'max', 'max_pool1d', 'max_pool1d_with_indices', 'max_pool2d', 'max_pool3d', 'maximum', 'mean', 'median', 'merge_type_from_type_comment', 'meshgrid', 'min', 'minimum', 'miopen_batch_norm', 'miopen_convolution', 'miopen_convolution_transpose', 'miopen_depthwise_convolution', 'miopen_rnn', 'mkldnn_adaptive_avg_pool2d', 'mkldnn_convolution', 'mkldnn_convolution_backward_weights', 'mkldnn_max_pool2d', 'mkldnn_max_pool3d', 'mm', 'mode', 'movedim', 'mul', 'multinomial', 'multiply', 'mv', 'mvlgamma', 'nanquantile', 'nansum', 'narrow', 'native_batch_norm', 'native_group_norm', 'native_layer_norm', 'native_norm', 'ne', 'neg', 'neg_', 'negative', 'negative_', 'nextafter', 'nonzero', 'norm', 'norm_except_dim', 'normal', 'not_equal', 'nuclear_norm', 'numel', 'ones', 'ones_like', 'orgqr', 'ormqr', 'outer', 'pairwise_distance', 'parse_ir', 'parse_schema', 'parse_type_comment', 'pca_lowrank', 'pdist', 'pinverse', 'pixel_shuffle', 'poisson', 'poisson_nll_loss', 'polar', 'polygamma', 'pow', 'prelu', 'prepare_multiprocessing_environment', 'prod', 'promote_types', 'q_per_channel_axis', 'q_per_channel_scales', 'q_per_channel_zero_points', 'q_scale', 'q_zero_point', 'qr', 'quantile', 'quantize_per_channel', 'quantize_per_tensor', 'quantized_batch_norm', 'quantized_gru', 'quantized_gru_cell', 'quantized_lstm', 'quantized_lstm_cell', 'quantized_max_pool1d', 'quantized_max_pool2d', 'quantized_rnn_relu_cell', 'quantized_rnn_tanh_cell', 'rad2deg', 'rad2deg_', 'rand', 'rand_like', 'randint', 'randint_like', 'randn', 'randn_like', 'randperm', 'range', 'real', 'reciprocal', 'reciprocal_', 'relu', 'relu_', 'remainder', 'renorm', 'repeat_interleave', 'reshape', 'resize_as_', 'result_type', 'rfft', 'rnn_relu', 'rnn_relu_cell', 'rnn_tanh', 'rnn_tanh_cell', 'roll', 'rot90', 'round', 'round_', 'rrelu', 'rrelu_', 'rsqrt', 'rsqrt_', 'rsub', 'saddmm', 'save', 'scalar_tensor', 'scatter', 'scatter_add', 'searchsorted', 'seed', 'select', 'selu', 'selu_', 'set_anomaly_enabled', 'set_autocast_enabled', 'set_default_dtype', 'set_default_tensor_type', 'set_deterministic', 'set_flush_denormal', 'set_num_interop_threads', 'set_num_threads', 'set_printoptions', 'set_rng_state', 'sgn', 'sigmoid', 'sigmoid_', 'sign', 'signbit', 'sin', 'sin_', 'sinh', 'sinh_', 'slogdet', 'smm', 'softmax', 'solve', 'sort', 'sparse_coo_tensor', 'split', 'split_with_sizes', 'spmm', 'sqrt', 'sqrt_', 'square', 'square_', 'squeeze', 'sspaddmm', 'stack', 'std', 'std_mean', 'stft', 'sub', 'subtract', 'sum', 'svd', 'svd_lowrank', 'symeig', 't', 'take', 'tan', 'tan_', 'tanh', 'tanh_', 'tensor', 'tensordot', 'threshold', 'threshold_', 'topk', 'trace', 'transpose', 'trapz', 'triangular_solve', 'tril', 'tril_indices', 'triplet_margin_loss', 'triu', 'triu_indices', 'true_divide', 'trunc', 'trunc_', 'typename', 'unbind', 'unify_type_list', 'unique', 'unique_consecutive', 'unsafe_chunk', 'unsafe_split', 'unsafe_split_with_sizes', 'unsqueeze', 'vander', 'var', 'var_mean', 'vdot', 'view_as_complex', 'view_as_real', 'vstack', 'wait', 'where', 'zero_', 'zeros', 'zeros_like'])

torch_create_tensor_function_list = vector(['arange', 'as_tensor', 'bartlett_window', 'blackman_window', 'empty', 'empty_meta', 'eye', 'hamming_window', 'hann_window', 'kaiser_window', 'ones', 'rand', 'randn', 'randperm', 'scalar_tensor', 'sparse_coo_tensor', 'zeros'])

torch_type_list = vector(['BFloat16Storage', 'BoolStorage', 'ByteStorage', 'CharStorage', 'ComplexDoubleStorage', 'ComplexFloatStorage', 'DisableTorchFunction', 'DoubleStorage', 'FatalError', 'FloatStorage', 'Generator', 'HalfStorage', 'HalfStorageBase', 'IntStorage', 'JITException', 'LongStorage', 'QInt32Storage', 'QInt32StorageBase', 'QInt8Storage', 'QInt8StorageBase', 'QUInt8Storage', 'ShortStorage', 'Size', 'Storage', 'Tensor', 'device', 'dtype', 'enable_grad', 'finfo', 'iinfo', 'layout', 'memory_format', 'no_grad', 'qscheme', 'set_grad_enabled'])

__all__ = ["torch_func_list", "torch_import_list", "torch_type_list", "torch_dtype_list"]
Ejemplo n.º 14
0
def maskshow(*masks,
             on=None,
             alpha=0.5,
             nslice=None,
             dim=-1,
             stretch=False,
             **kwargs):
    global canvas
    if on is not None:
        if isinstance(on, (int, tuple)): background(*on)
        elif isarray(on): canvas = to_image(on, nslice, dim)
        elif isinstance(on, list): canvas = to_image(Tensor(on), nslice, dim)
        else: raise TypeError("Unrecognized argument 'on' for 'maskshow'. ")
    elif canvas is None:
        canvas = (1., ) * 3
    if len(masks) == 0: return imshow
    alpha = totuple(alpha, len(masks))
    new_masks = []
    new_alpha = []
    for m, a in zip(masks, alpha):
        img = to_image(m, nslice, dim)
        if img.ndim == 3:
            new_masks.extend(x.squeeze(-1) for x in img.split(1, dim=dim))
            new_alpha.extend([a] * img.size(dim))
        else:
            new_masks.append(img)
            new_alpha.append(a)
    color_mask_map = [
        (to_RGB(c), m, a)
        for c, m, a in zip(colors * (len(new_masks) // len(colors) +
                                     1), new_masks, new_alpha)
    ]
    color_mask_map.extend((to_RGB(c), m, alpha[0]) for c, m in kwargs.items())

    if not stretch:
        shapes = [m.ishape for _, m, _ in color_mask_map]
        target_shape = shapes[0]
        if len(set(shapes)) > 1 or not isinstance(
                canvas, tuple) and target_shape != canvas.shape:
            raise TypeError(
                "Please use masks of the same size as the background image, "
                "or use 'stretch=True' in 'maskshow' to automatically adjust the image sizes. "
            )
    else:

        def adjust(m, to):
            ms = tuple(m.shape)
            scaling = tuple((a // b, b // a) for a, b in zip(to, ms))
            return m.down_scale([max(v, 1) for u, v in scaling
                                 ]).up_scale([max(u, 1)
                                              for u, v in scaling]).crop_as(to)

        shapes = [m.ishape for _, m, _ in color_mask_map]
        if not isinstance(canvas, tuple): shapes.append(canvas.shape[:2])
        areas = [u * v for u, v in shapes]
        target_shape = shapes[areas.index(max(areas))]
        color_mask_map = [(c, adjust(m, to=target_shape), a)
                          for c, m, a in color_mask_map]
        canvas = adjust(canvas, to=target_shape)

    target_shape = tp.Size(*target_shape, {3})
    if isinstance(canvas, tuple):
        canvas = tp.tensor(list(canvas)).expand_to(target_shape)
    elif canvas.ndim == 2:
        canvas = canvas.expand_to(target_shape)
    coeff = vector(1 - a * m for _, m, a in color_mask_map).prod()
    canvas *= coeff
    for i, (c, m, a) in enumerate(color_mask_map):
        coeff = vector(a * m if j == i else 1 - a * m
                       for j, (_, m, a) in enumerate(color_mask_map)).prod()
        canvas += coeff.unsqueeze(-1) * m.unsqueeze(-1) * tp.tensor(
            list(c)).unsqueeze(0, 1)

    return plt.imshow(canvas.numpy(), **kwargs)
Ejemplo n.º 15
0
    def all_memory_amount(device_number):
        h = nvmlDeviceGetHandleByIndex(device_number)
        info = nvmlDeviceGetMemoryInfo(h)
        return info.total

    def device_name(device_number):
        h = nvmlDeviceGetHandleByIndex(device_number)
        name = nvmlDeviceGetName(h)
        return name

    def power_usage(device_number):
        h = nvmlDeviceGetHandleByIndex(device_number)
        return nvmlDeviceGetPowerUsage(h) / nvmlDeviceGetPowerManagementLimit(
            h)

    available_gpus_memory = vector(
        [free_memory_amount(i) for i in available_gpu_ids])
    all_gpus_memory = vector([all_memory_amount(i) for i in available_gpu_ids])
    available_gpu_name = vector(available_gpu_ids).map(device_name)
    gpu_power_usage = vector(available_gpu_ids).map(power_usage)

    nvmlShutdown()

    warning_free_memory_threshold = eval(os.environ.get(
        'CUDA_RUN_MEMORY', '5'))
else:
    available_gpus_memory = vector()
    all_gpus_memory = vector()
    available_gpu_name = vector()
    gpu_power_usage = vector()
    warning_free_memory_threshold = 0