示例#1
0
文件: fullbatch.py 项目: ajkxyz/veles
    def initialize(self, device, **kwargs):
        super(FullBatchLoader, self).initialize(device=device, **kwargs)
        assert self.total_samples > 0
        self.analyze_original_dataset()
        self._map_original_labels()

        if isinstance(self.device, NumpyDevice):
            return

        self.info("Will try to store the entire dataset on the device")
        try:
            self.init_vectors(self.original_data, self.minibatch_data)
        except CLRuntimeError as e:
            if e.code == CL_MEM_OBJECT_ALLOCATION_FAILURE:
                self.warning("Failed to store the entire dataset on the " "device")
                self.force_numpy = True
                self.device = NumpyDevice()
                return
            else:
                raise from_none(e)
        except CUDARuntimeError as e:
            if e.code == CUDA_ERROR_OUT_OF_MEMORY:
                self.warning("Failed to store the entire dataset on the " "device")
                self.force_numpy = True
                self.device = NumpyDevice()
                return
            else:
                raise from_none(e)
        if self.has_labels:
            self.init_vectors(self._mapped_original_labels_, self.minibatch_labels)

        if not self.shuffled_indices:
            self.shuffled_indices.mem = numpy.arange(self.total_samples, dtype=Loader.LABEL_DTYPE)
        self.init_vectors(self.shuffled_indices, self.minibatch_indices)
示例#2
0
    def _generate_source(self, defines, include_dirs, dtype, suffix,
                         template_kwargs):
        if defines and not isinstance(defines, dict):
            raise RuntimeError("defines must be a dictionary")
        jsuffix = ".j" + suffix
        suffix = "." + suffix
        lines = []

        def define(cdefs, undef=False):
            for key, value in sorted(cdefs.items()):
                if not undef:
                    lines.append("#define %(key)s %(value)s\n" % locals())
                else:
                    lines.append("#undef %(key)s\n" % locals())

        my_defines = copy(defines) if defines else {}
        self._adjust_defines(my_defines, dtype)
        define(my_defines)
        for name, defs in sorted(self.sources_.items()):
            define(defs)
            if len(template_kwargs) == 0:
                # No templating
                lines.append("#include \"%s%s\"\n" % (name, suffix))
            else:
                try:
                    self._include_file(include_dirs, name + jsuffix, lines)
                except IncludeError:
                    try:
                        self._include_file(include_dirs, name + suffix, lines)
                    except IncludeError:
                        raise from_none(
                            IncludeError("Unable to include \"%s(%s|%s)\"" %
                                         (name, jsuffix, suffix)))
            define(defs, undef=True)
            lines.append("\n")
        source = "".join(lines)
        if len(template_kwargs) == 0:
            return source, my_defines
        include_re = re.compile(
            r'^\s*#\s*include\s*(<(\w+%(sfx)s)>|"(\w+%(sfx)s)")\s*$' %
            {"sfx": "\\" + jsuffix}, flags=re.MULTILINE)
        match = include_re.search(source)
        while match is not None:
            file = match.group(2) or match.group(3)
            lines = []
            self._include_file(include_dirs, file, lines)
            source = include_re.sub("\n" + "".join(lines), source, count=1)
            match = include_re.search(source)
        try:
            source = Template(source).render(**template_kwargs)
        except TemplateError as e:
            self.error(
                "Failed to render the template. Here is the source:\n%s\n",
                "".join("%04d\t%s" % (i + 1, l)
                        for i, l in enumerate(lines)))
            raise from_none(e)
        return source, my_defines
示例#3
0
文件: base.py 项目: zghzdxs/veles
    def initialize(self, **kwargs):
        """Loads the data, initializes indices, shuffles the training set.
        """
        if self.testing:
            self.shuffle_limit = 0
            self.global_offset = 0
            del self.failed_minibatches[:]
        try:
            super(Loader, self).initialize(**kwargs)
        except AttributeError:
            pass
        try:
            self.load_data()
        except AttributeError as e:
            self.exception("Failed to load the data")
            raise from_none(e)
        if self.class_lengths[TRAIN] > 0:
            self.reset_normalization()
        self.max_minibatch_size = kwargs.get("minibatch_size",
                                             self.max_minibatch_size)
        self.on_before_create_minibatch_data()
        self._calc_class_end_offsets()
        sn_log_str = "Samples number: test: %d, validation: %d, train: %d"
        if self.train_ratio == 1.0:
            self.info(sn_log_str, *self.class_lengths)
        else:
            self.info(
                sn_log_str + " (used: %d)",
                *(self.class_lengths + [
                    self.effective_class_end_offsets[TRAIN] -
                    self.effective_class_end_offsets[VALID]
                ]))

        self.minibatch_labels.reset(
            numpy.zeros(self.max_minibatch_size, dtype=Loader.LABEL_DTYPE
                        ) if self.has_labels else None)
        self.raw_minibatch_labels[:] = (None, ) * self.max_minibatch_size
        self.minibatch_indices.reset(
            numpy.zeros(self.max_minibatch_size, dtype=Loader.INDEX_DTYPE))

        try:
            self.create_minibatch_data()
        except Exception as e:
            self.error("Failed to create minibatch data")
            raise from_none(e)

        if not self.minibatch_data:
            raise error.BadFormatError("minibatch_data MUST be initialized in "
                                       "create_minibatch_data()")
        self.analyze_dataset()
        if self.testing:
            self.shuffled_indices.mem = None
        if not self.restored_from_snapshot or self.testing:
            self.shuffle()
示例#4
0
文件: base.py 项目: 2php/veles
    def initialize(self, **kwargs):
        """Loads the data, initializes indices, shuffles the training set.
        """
        if self.testing:
            self.shuffle_limit = 0
            self.global_offset = 0
            del self.failed_minibatches[:]
        try:
            super(Loader, self).initialize(**kwargs)
        except AttributeError:
            pass
        try:
            self.load_data()
        except AttributeError as e:
            self.exception("Failed to load the data")
            raise from_none(e)
        if self.class_lengths[TRAIN] > 0:
            self.reset_normalization()
        self.max_minibatch_size = kwargs.get("minibatch_size",
                                             self.max_minibatch_size)
        self.on_before_create_minibatch_data()
        self._calc_class_end_offsets()
        sn_log_str = "Samples number: test: %d, validation: %d, train: %d"
        if self.train_ratio == 1.0:
            self.info(sn_log_str, *self.class_lengths)
        else:
            self.info(sn_log_str + " (used: %d)", *(self.class_lengths + [
                self.effective_class_end_offsets[TRAIN] -
                self.effective_class_end_offsets[VALID]]))

        self.minibatch_labels.reset(numpy.zeros(
            self.max_minibatch_size, dtype=Loader.LABEL_DTYPE)
            if self.has_labels else None)
        self.raw_minibatch_labels[:] = (None,) * self.max_minibatch_size
        self.minibatch_indices.reset(numpy.zeros(
            self.max_minibatch_size, dtype=Loader.INDEX_DTYPE))

        try:
            self.create_minibatch_data()
        except Exception as e:
            self.error("Failed to create minibatch data")
            raise from_none(e)

        if not self.minibatch_data:
            raise error.BadFormatError("minibatch_data MUST be initialized in "
                                       "create_minibatch_data()")
        self.analyze_dataset()
        if self.testing:
            self.shuffled_indices.mem = None
        if not self.restored_from_snapshot or self.testing:
            self.shuffle()
示例#5
0
 def sigint_handler(sign, frame):
     """
     Private method - handler for SIGINT.
     """
     ThreadPool.interrupted = True
     ThreadPool.shutdown_pools(execute_remaining=False, force=True)
     log = logging.getLogger("ThreadPool")
     try:
         # ThreadPool.sigint_initial(sign, frame) does not work on Python 2
         sigint_initial = ThreadPool.__dict__['sigint_initial']
         if sigint_initial == ThreadPool.sigint_handler:
             log.warning("Prevented an infinite recursion: sigint_initial")
         else:
             sigint_initial(sign, frame)
     except KeyboardInterrupt:
         if not reactor.running:
             if not ThreadPool.sigint_printed:
                 log.warning("Raising KeyboardInterrupt since "
                             "Twisted reactor is not running")
                 ThreadPool.sigint_printed = True
                 raise from_none(KeyboardInterrupt())
             ThreadPool._warn_about_sigint_hysteria(log)
         else:
             if not ThreadPool.sigint_printed:
                 log.critical("KeyboardInterrupt")
                 ThreadPool.debug_deadlocks()
                 ThreadPool.sigint_printed = True
             else:
                 if not is_interactive():
                     ThreadPool._warn_about_sigint_hysteria(log)
                 else:
                     ThreadPool._warn_about_sigint_interactive_reactor(log)
示例#6
0
 def __init__(self, path=None):
     if self._handle is not None:
         return
     super(libsndfile, self).__init__()
     self.debug(
         "Initializing a new instance of libsndfile class "
         "(path is %s)", path)
     if not path:
         self.info("Library path was not specified, "
                   "will use the default (libsndfile.so.1)")
         path = "libsndfile.so.1"
     self._path = path
     try:
         self.debug("Trying to load %s...", path)
         self._handle = cdll.LoadLibrary(path)
     except OSError as e:
         self.critical("Failed to load %s", path)
         raise from_none(e)
     self.debug("Success. Loading functions...")
     self._handle.sf_open.argtypes = [c_char_p, c_int, POINTER(SF_INFO)]
     self._handle.sf_open.restype = POINTER(SNDFILE)
     self._handle.sf_close.argtypes = [POINTER(SNDFILE)]
     self._handle.sf_close.restype = c_int
     self._handle.sf_readf_short.argtypes = [
         POINTER(SNDFILE), POINTER(c_short), c_int64
     ]
     self._handle.sf_readf_short.restype = c_int64
     self.debug("Finished loading functions")
示例#7
0
 def __init__(cls, name, bases, clsdict):
     super(BackendRegistry, cls).__init__(name, bases, clsdict)
     try:
         BackendRegistry.backends[clsdict["BACKEND"]] = cls
     except KeyError:
         raise from_none(KeyError("%s does not define BACKEND" % cls))
     assert "PRIORITY" in clsdict, "%s does not define PRIORITY" % cls
示例#8
0
    def _get_some_device(self, **kwargs):
        """Gets some device from the available CUDA devices.
        Returns True if any device was selected, otherwise, False.
        """
        device = self.parse_device(**kwargs)
        try:
            devices = cu.Devices()
        except (OSError, cu.CUDARuntimeError):
            devices = None
        if devices is None or not len(devices):
            raise DeviceNotFoundError("No CUDA devices were found")
        self._id = device
        if device == "":
            context = devices.create_some_context()
        else:
            try:
                device = devices[int(device)]
            except IndexError:
                raise from_none(
                    DeviceNotFoundError(
                        "CUDA device %s was not found." % device))
            context = device.create_context()
        self._context_ = context

        device = self.context.device
        self.device_info = DeviceInfo(
            desc=device.name, memsize=device.total_mem,
            memalign=4096, version=device.compute_capability,
            device_type="CUDA",
            max_work_group_size=device.max_grid_dims,
            max_work_item_sizes=device.max_block_dims,
            local_memsize=device.max_shared_memory_per_block)
        return True
示例#9
0
文件: thread_pool.py 项目: 2php/veles
 def sigint_handler(sign, frame):
     """
     Private method - handler for SIGINT.
     """
     ThreadPool.interrupted = True
     ThreadPool.shutdown_pools(execute_remaining=False, force=True)
     log = logging.getLogger("ThreadPool")
     try:
         # ThreadPool.sigint_initial(sign, frame) does not work on Python 2
         sigint_initial = ThreadPool.__dict__['sigint_initial']
         if sigint_initial == ThreadPool.sigint_handler:
             log.warning("Prevented an infinite recursion: sigint_initial")
         else:
             sigint_initial(sign, frame)
     except KeyboardInterrupt:
         if not reactor.running:
             if not ThreadPool.sigint_printed:
                 log.warning("Raising KeyboardInterrupt since "
                             "Twisted reactor is not running")
                 ThreadPool.sigint_printed = True
                 raise from_none(KeyboardInterrupt())
             ThreadPool._warn_about_sigint_hysteria(log)
         else:
             if not ThreadPool.sigint_printed:
                 log.critical("KeyboardInterrupt")
                 ThreadPool.debug_deadlocks()
                 ThreadPool.sigint_printed = True
             else:
                 if not is_interactive():
                     ThreadPool._warn_about_sigint_hysteria(log)
                 else:
                     ThreadPool._warn_about_sigint_interactive_reactor(log)
示例#10
0
 def __init__(cls, name, bases, clsdict):
     super(BackendRegistry, cls).__init__(name, bases, clsdict)
     try:
         BackendRegistry.backends[clsdict["BACKEND"]] = cls
     except KeyError:
         raise from_none(KeyError("%s does not define BACKEND" % cls))
     assert "PRIORITY" in clsdict, "%s does not define PRIORITY" % cls
示例#11
0
    def _get_some_device(self, **kwargs):
        """Gets some device from the available CUDA devices.
        Returns True if any device was selected, otherwise, False.
        """
        device = self.parse_device(**kwargs)
        try:
            devices = cu.Devices()
        except (OSError, cu.CUDARuntimeError):
            devices = None
        if devices is None or not len(devices):
            raise DeviceNotFoundError("No CUDA devices were found")
        self._id = device
        if device == "":
            context = devices.create_some_context()
        else:
            try:
                device = devices[int(device)]
            except IndexError:
                raise from_none(
                    DeviceNotFoundError("CUDA device %s was not found." %
                                        device))
            context = device.create_context()
        self._context_ = context

        device = self.context.device
        self.device_info = DeviceInfo(
            desc=device.name,
            memsize=device.total_mem,
            memalign=4096,
            version=device.compute_capability,
            device_type="CUDA",
            max_work_group_size=device.max_grid_dims,
            max_work_item_sizes=device.max_block_dims,
            local_memsize=device.max_shared_memory_per_block)
        return True
示例#12
0
    def initialize(self, device, **kwargs):
        super(Deconv, self).initialize(device, **kwargs)

        self._dtype = self.input.dtype

        self.weights_shape = (tuple(reversed(self.weights.shape))
                              if self.weights_transposed
                              else self.weights.shape)

        if hasattr(self, "bias"):
            raise ValueError("bias should not be set")
        if (len(self.input.shape) != 4 or
                self.input.shape[3] != self.n_kernels):
            raise ValueError("Incorrectly shaped input encountered")
        if (len(self.weights_shape) != 2 or
                self.weights_shape[0] != self.n_kernels or
                self.weights_shape[1] % (self.kx * self.ky) != 0):
            raise ValueError("Incorrectly shaped weights encountered")

        output_shape = tuple(self.output_shape_source.shape)
        if len(output_shape) != 4:
            raise ValueError("Incorrect output_shape_source shape")
        if output_shape[0] != self.input.shape[0]:
            raise ValueError(
                "output_shape_source.shape[0] != input.shape[0]")

        try:
            self.check_padding_is_safe(self.kx, self.ky, self.sliding)
        except ValueError as e:
            if not self.unsafe_padding:
                raise from_none(e)
            self.warning("The padding will be unsafe")
            self._create_hits(output_shape)

        padding = Deconv.compute_padding(
            output_shape[2], output_shape[1], self.kx, self.ky, self.sliding)
        if self.padding is None:  # pylint: disable=E0203
            self.padding = padding
        elif self.padding != padding:
            if not self.unsafe_padding:
                raise ValueError(
                    "Expected padding %s but got %s" % (padding, self.padding))
            self._create_hits(output_shape)

        if self.output:
            assert self.output.shape[1:] == output_shape[1:]
        if not self.output or self.output.shape[0] != output_shape[0]:
            self.output.reset(numpy.zeros(output_shape,
                                          dtype=self._dtype))

        self._output_shape = output_shape

        self._sy, self._sx, self._n_channels = self._output_shape[1:]
        self._kernel_size = self.kx * self.ky * self._n_channels

        self._kernel_app_per_image = self.input.sample_size // self.n_kernels
        self._kernel_app_total = (self._kernel_app_per_image *
                                  self.input.shape[0])

        self.init_vectors(self.input, self.weights, self.output, self.hits)
示例#13
0
 def evaluate(self, chromo):
     for tune, val in zip(self.tuneables, chromo.numeric):
         tune <<= val
     chromo.config = copy.deepcopy(self.config)
     with NamedTemporaryFile(mode="wb", prefix="veles-optimization-config-",
                             suffix=".%d.pickle" % best_protocol) as fcfg:
         pickle.dump(self.config, fcfg)
         fcfg.flush()
         with NamedTemporaryFile(
                 mode="r", prefix="veles-optimization-result-",
                 suffix=".%d.pickle" % best_protocol) as fres:
             argv = ["--result-file", fres.name, "--stealth", "--log-id",
                     self.launcher.log_id] + self._filtered_argv_ + \
                 ["root.common.disable.publishing=True"]
             if self.plotters_are_disabled:
                 argv = ["-p", ""] + argv
             i = -1
             while "=" in argv[i]:
                 i -= 1
             argv[i] = fcfg.name
             result = self._exec(argv, fres)
             if result is None:
                 raise EvaluationError()
     try:
         chromo.fitness = result["EvaluationFitness"]
     except KeyError:
         raise from_none(EvaluationError(
             "Failed to find \"EvaluationFitness\" in the evaluation "
             "results"))
     chromo.snapshot = result.get("Snapshot")
     self.info("Chromosome #%d was evaluated to %f", self._chromosome_index,
               chromo.fitness)
示例#14
0
 def upload(self, token, metadata, reader):
     name = metadata["name"]
     version = metadata["version"]
     rep = self.repos.get(name)
     if rep is None:
         where = os.path.join(self.root, name)
         need_init = True
         if os.path.exists(where):
             self.warning("%s exists - cleared", where)
             shutil.rmtree(where)
             os.mkdir(where)
     else:
         where = dirname(rep.path)
         need_init = False
     with TarFile.open(mode="r|gz", fileobj=reader) as tar:
         tar.extractall(where)
     if not need_init:
         self.add_version(rep, version)
     else:
         self.repos[name] = rep = pygit2.init_repository(where)
         try:
             self.add_version(rep, version)
         except Exception as e:
             shutil.rmtree(where)
             del self.repos[name]
             self.error("Failed to initialize %s", name)
             raise from_none(e)
         rep.config["forge.tokens"] = self.scramble(token)
     self._generate_images(metadata, rep)
示例#15
0
文件: memory.py 项目: 2php/veles
 def max_supposed(self, value):
     try:
         1.0 + value
     except TypeError:
         raise from_none(TypeError(
             "max_value must be set to floating point number"))
     self._max_value = value
示例#16
0
 def run(self, loop=True):
     forge = root.common.forge
     self.application = web.Application([
         (self.uri(forge.service_name), ServiceHandler, {"server": self}),
         (self.uri(forge.upload_name), UploadHandler, {"server": self}),
         (self.uri(forge.fetch_name), FetchHandler, {"server": self}),
         (self.uri("forge.html"), ForgeHandler, {"server": self}),
         (self.uri("image.html"), ImagePageHandler),
         (self.uri("thumbnails/(.*)"), ThumbnailHandler,
          {"path": self.root}),
         (self.uri("images/(.*)"), ImageStaticHandler, {"path": self.root}),
         (self.uri("((js|css|fonts|img|maps)/.*)"),
          web.StaticFileHandler, {'path': root.common.web.root}),
         (self.suburi, web.RedirectHandler,
          {"url": self.uri("forge.html"), "permanent": True}),
         (self.suburi[:-1], web.RedirectHandler,
          {"url": self.uri("forge.html"), "permanent": True}),
     ], template_loader=ForgeTemplateLoader(
         root.common.web.templates, root.common.forge.email_templates))
     try:
         self.application.listen(self.port)
     except OSError as e:
         self.error("Failed to open port %d", self.port)
         raise from_none(e)
     self.info("Listening on port %d, suburi %s" % (self.port, self.suburi))
     if loop:
         IOLoop.instance().start()
示例#17
0
 def _connectOrBind(self, endpoints):
     """
     Connect and/or bind socket to endpoints.
     """
     rnd_vals = []
     for endpoint in endpoints:
         if endpoint.type == ZmqEndpointType.connect:
             self.debug("Connecting to %s...", endpoint)
             self.socket.connect(endpoint.address)
         elif endpoint.type == ZmqEndpointType.bind:
             self.debug("Binding to %s...", endpoint)
             if endpoint.address.startswith("rndtcp://") or endpoint.address.startswith("rndepgm://"):
                 try:
                     endpos = endpoint.address.find("://") + 3
                     proto = endpoint.address[3:endpos]
                     splitted = endpoint.address[endpos:].split(":")
                     min_port, max_port, max_tries = splitted[-3:]
                     addr = ":".join(splitted[:-3])
                 except ValueError:
                     raise from_none(ValueError("Failed to parse %s" % endpoint.address))
                 rnd_vals.append(
                     self.socket.bind_to_random_port(proto + addr, int(min_port), int(max_port), int(max_tries))
                 )
             elif endpoint.address.startswith("rndipc://"):
                 prefix, suffix = endpoint.address[9:].split(":")
                 ipc_fd, ipc_fn = mkstemp(suffix, prefix)
                 self.socket.bind("ipc://" + ipc_fn)
                 rnd_vals.append(ipc_fn)
                 os.close(ipc_fd)
             else:
                 self.socket.bind(endpoint.address)
         else:
             assert False, "Unknown endpoint type %r" % endpoint
     return rnd_vals
示例#18
0
 def evaluate(self, chromo):
     for tune, val in zip(self.tuneables, chromo.numeric):
         tune <<= val
     chromo.config = copy.deepcopy(self.config)
     with NamedTemporaryFile(mode="wb",
                             prefix="veles-optimization-config-",
                             suffix=".%d.pickle" % best_protocol) as fcfg:
         pickle.dump(self.config, fcfg)
         fcfg.flush()
         with NamedTemporaryFile(mode="r",
                                 prefix="veles-optimization-result-",
                                 suffix=".%d.pickle" %
                                 best_protocol) as fres:
             argv = ["--result-file", fres.name, "--stealth", "--log-id",
                     self.launcher.log_id] + self._filtered_argv_ + \
                 ["root.common.disable.publishing=True"]
             if self.plotters_are_disabled:
                 argv = ["-p", ""] + argv
             i = -1
             while "=" in argv[i]:
                 i -= 1
             argv[i] = fcfg.name
             result = self._exec(argv, fres)
             if result is None:
                 raise EvaluationError()
     try:
         chromo.fitness = result["EvaluationFitness"]
     except KeyError:
         raise from_none(
             EvaluationError(
                 "Failed to find \"EvaluationFitness\" in the evaluation "
                 "results"))
     chromo.snapshot = result.get("Snapshot")
     self.info("Chromosome #%d was evaluated to %f", self._chromosome_index,
               chromo.fitness)
示例#19
0
文件: memory.py 项目: zghzdxs/veles
 def max_supposed(self, value):
     try:
         1.0 + value
     except TypeError:
         raise from_none(
             TypeError("max_value must be set to floating point number"))
     self._max_value = value
示例#20
0
文件: pickles.py 项目: 2php/veles
 def load_pickles(self, index, pickles, pbar):
     unpickled = []
     for pick in pickles:
         try:
             with open(pick, "rb") as fin:
                 self.debug("Loading %s...", pick)
                 if six.PY3:
                     loaded = pickle.load(fin, encoding='charmap')
                 else:
                     loaded = pickle.load(fin)
                 unpickled.append(loaded)
                 pbar.inc()
         except Exception as e:
             self.warning(
                 "Failed to load %s (part of %s set)" %
                 (pick, CLASS_NAME[index]))
             raise from_none(e)
     data = []
     labels = []
     for obj, pick in zip(unpickled, pickles):
         if not isinstance(obj, dict):
             raise TypeError(
                 "%s has the wrong format (part of %s set)" %
                 (pick, CLASS_NAME[index]))
         try:
             data.append(obj["data"])
             labels.append(
                 numpy.array(obj["labels"], dtype=Loader.LABEL_DTYPE))
         except KeyError as e:
             self.error("%s has the wrong format (part of %s set)",
                        pick, CLASS_NAME[index])
             raise from_none(e)
     lengths = [0, sum(len(l) for l in labels)]
     for arr in data:
         lengths[0] += arr.shape[0]
         if arr.shape[1:] != data[0].shape[1:]:
             raise error.BadFormatError(
                 "Array has a different shape: expected %s, got %s"
                 "(%s set)" % (data[0].shape[1:],
                               arr.shape[1:], CLASS_NAME[index]))
     if lengths[0] != lengths[1]:
         raise error.BadFormatError(
             "Data and labels has the different number of samples (data %d,"
             " labels %d)" % lengths)
     length = lengths[0]
     self.class_lengths[index] = length
     return length, data, labels
示例#21
0
文件: fullbatch.py 项目: ajkxyz/veles
 def _after_backend_init(self):
     try:
         self.fill_indices(0, min(self.max_minibatch_size, self.total_samples))
     except CLRuntimeError as e:
         if e.code == CL_MEM_OBJECT_ALLOCATION_FAILURE:
             self.warning("Failed to store the entire dataset on the " "device")
             self.force_numpy = True
             self.device = NumpyDevice()
         else:
             raise from_none(e)
     except CUDARuntimeError as e:
         if e.code == CUDA_ERROR_OUT_OF_MEMORY:
             self.warning("Failed to store the entire dataset on the " "device")
             self.force_numpy = True
             self.device = NumpyDevice()
         else:
             raise from_none(e)
示例#22
0
文件: launcher.py 项目: 2php/veles
 def initialize_workflow():
     try:
         self.workflow.initialize(device=self.device, **kwargs)
     except Exception as ie:
         self.error("Failed to initialize the workflow")
         self._stop_graphics()
         self.device_thread_pool_detach()
         raise from_none(ie)
示例#23
0
 def _import_fobj(fobj):
     try:
         return pickle.load(fobj)
     except ImportError as e:
         logging.getLogger("Snapshotter").error(
             "Are you trying to import snapshot belonging to a different "
             "workflow?")
         raise from_none(e)
示例#24
0
 def initialize_workflow():
     try:
         self.workflow.initialize(device=self.device, **kwargs)
     except Exception as ie:
         self.error("Failed to initialize the workflow")
         self._stop_graphics()
         self.device_thread_pool_detach()
         raise from_none(ie)
示例#25
0
 def _import_fobj(fobj):
     try:
         return pickle.load(fobj)
     except ImportError as e:
         logging.getLogger("Snapshotter").error(
             "Are you trying to import snapshot belonging to a different "
             "workflow?")
         raise from_none(e)
示例#26
0
    def __init__(self, thread_pool=None):
        if self.initialized:
            return
        self.initialized = True
        assert thread_pool is not None, (
            "GraphicsServer was not previously initialized")
        super(GraphicsServer, self).__init__()
        parser = GraphicsServer.init_parser()
        args, _ = parser.parse_known_args(self.argv)
        self._debug_pickle = args.graphics_pickle_debug
        zmq_endpoints = [
            ZmqEndpoint("bind", "inproc://veles-plots"),
            ZmqEndpoint("bind", "rndipc://veles-ipc-plots-:")
        ]
        ifaces = []
        for iface, _ in interfaces():
            if iface in root.common.graphics.blacklisted_ifaces:
                continue
            ifaces.append(iface)
            zmq_endpoints.append(
                ZmqEndpoint(
                    "bind", "rndepgm://%s;%s:1024:65535:1" %
                    (iface, root.common.graphics.multicast_address)))
        self.debug("Trying to bind to %s...", zmq_endpoints)

        try:
            self.zmq_connection, btime = timeit(ZmqPublisher, zmq_endpoints)
        except zmq.error.ZMQError:
            self.exception("Failed to bind to %s", zmq_endpoints)
            raise from_none(GraphicsServer.InitializationError())

        # Important! Save the bound method to variable to avoid dead weak refs
        # See http://stackoverflow.com/questions/19443440/weak-reference-to-python-class-method  # nopep8
        self._shutdown_ = self.shutdown
        thread_pool.register_on_shutdown(self._shutdown_)

        # tmpfn, *ports = self.zmq_connection.rnd_vals
        tmpfn = self.zmq_connection.rnd_vals[0]
        ports = self.zmq_connection.rnd_vals[1:]

        self.endpoints = {
            "inproc": "inproc://veles-plots",
            "ipc": "ipc://" + tmpfn,
            "epgm": []
        }
        for port, iface in zip(ports, ifaces):
            self.endpoints["epgm"].append(
                "epgm://%s;%s:%d" %
                (iface, root.common.graphics.multicast_address, port))
        self.info(
            "Publishing to %s",
            "; ".join([self.endpoints["inproc"], self.endpoints["ipc"]] +
                      self.endpoints["epgm"]))
        if btime > 1:
            self.warning(
                "EPGM bind took %d seconds - consider adding offending "
                "interfaces to root.common.graphics.blacklisted_ifaces or "
                "completely disabling graphics (-p '').", int(btime))
示例#27
0
文件: pickles.py 项目: zghzdxs/veles
 def load_pickles(self, index, pickles, pbar):
     unpickled = []
     for pick in pickles:
         try:
             with open(pick, "rb") as fin:
                 self.debug("Loading %s...", pick)
                 if six.PY3:
                     loaded = pickle.load(fin, encoding='charmap')
                 else:
                     loaded = pickle.load(fin)
                 unpickled.append(loaded)
                 pbar.inc()
         except Exception as e:
             self.warning("Failed to load %s (part of %s set)" %
                          (pick, CLASS_NAME[index]))
             raise from_none(e)
     data = []
     labels = []
     for obj, pick in zip(unpickled, pickles):
         if not isinstance(obj, dict):
             raise TypeError("%s has the wrong format (part of %s set)" %
                             (pick, CLASS_NAME[index]))
         try:
             data.append(obj["data"])
             labels.append(
                 numpy.array(obj["labels"], dtype=Loader.LABEL_DTYPE))
         except KeyError as e:
             self.error("%s has the wrong format (part of %s set)", pick,
                        CLASS_NAME[index])
             raise from_none(e)
     lengths = [0, sum(len(l) for l in labels)]
     for arr in data:
         lengths[0] += arr.shape[0]
         if arr.shape[1:] != data[0].shape[1:]:
             raise error.BadFormatError(
                 "Array has a different shape: expected %s, got %s"
                 "(%s set)" %
                 (data[0].shape[1:], arr.shape[1:], CLASS_NAME[index]))
     if lengths[0] != lengths[1]:
         raise error.BadFormatError(
             "Data and labels has the different number of samples (data %d,"
             " labels %d)" % lengths)
     length = lengths[0]
     self.class_lengths[index] = length
     return length, data, labels
示例#28
0
文件: verified.py 项目: 2php/veles
 def verify_interface(self, iface):
     if getattr(type(self), "DISABLE_INTERFACE_VERIFICATION", False):
         return
     if not iface.providedBy(self):
         raise NotImplementedError(
             "Unit %s does not implement %s interface"
             % (repr(self), iface.__name__))
     try:
         verifyObject(iface, self)
     except Exception as e:
         self.error("%s does not pass verifyObject(%s)", self, iface)
         raise from_none(e)
     try:
         verifyClass(iface, self.__class__)
     except Exception as e:
         self.error("%s does not pass verifyClass(%s)",
                    self.__class__, iface)
         raise from_none(e)
示例#29
0
    def initialize(self, device, **kwargs):
        super(Deconv, self).initialize(device, **kwargs)

        self._dtype = self.input.dtype

        self.weights_shape = (tuple(reversed(self.weights.shape)) if
                              self.weights_transposed else self.weights.shape)

        if hasattr(self, "bias"):
            raise ValueError("bias should not be set")
        if (len(self.input.shape) != 4
                or self.input.shape[3] != self.n_kernels):
            raise ValueError("Incorrectly shaped input encountered")
        if (len(self.weights_shape) != 2
                or self.weights_shape[0] != self.n_kernels
                or self.weights_shape[1] % (self.kx * self.ky) != 0):
            raise ValueError("Incorrectly shaped weights encountered")

        output_shape = tuple(self.output_shape_source.shape)
        if len(output_shape) != 4:
            raise ValueError("Incorrect output_shape_source shape")
        if output_shape[0] != self.input.shape[0]:
            raise ValueError("output_shape_source.shape[0] != input.shape[0]")

        try:
            self.check_padding_is_safe(self.kx, self.ky, self.sliding)
        except ValueError as e:
            if not self.unsafe_padding:
                raise from_none(e)
            self.warning("The padding will be unsafe")
            self._create_hits(output_shape)

        padding = Deconv.compute_padding(output_shape[2], output_shape[1],
                                         self.kx, self.ky, self.sliding)
        if self.padding is None:  # pylint: disable=E0203
            self.padding = padding
        elif self.padding != padding:
            if not self.unsafe_padding:
                raise ValueError("Expected padding %s but got %s" %
                                 (padding, self.padding))
            self._create_hits(output_shape)

        if not self.output:
            self.output.reset(numpy.zeros(output_shape, dtype=self._dtype))
        else:
            assert self.output.shape == output_shape

        self._output_shape = output_shape

        self._sy, self._sx, self._n_channels = self._output_shape[1:]
        self._kernel_size = self.kx * self.ky * self._n_channels

        self._kernel_app_per_image = self.input.sample_size // self.n_kernels
        self._kernel_app_total = (self._kernel_app_per_image *
                                  self.input.shape[0])

        self.init_vectors(self.input, self.weights, self.output, self.hits)
示例#30
0
 def verify_interface(self, iface):
     if getattr(type(self), "DISABLE_INTERFACE_VERIFICATION", False):
         return
     if not iface.providedBy(self):
         raise NotImplementedError(
             "Unit %s does not implement %s interface" %
             (repr(self), iface.__name__))
     try:
         verifyObject(iface, self)
     except Exception as e:
         self.error("%s does not pass verifyObject(%s)", self, iface)
         raise from_none(e)
     try:
         verifyClass(iface, self.__class__)
     except Exception as e:
         self.error("%s does not pass verifyClass(%s)", self.__class__,
                    iface)
         raise from_none(e)
示例#31
0
    def ocl_build_program(self, defines, cache_file_name, dtype,
                          template_kwargs):
        """Builds the OpenCL program.

        `program_` will be initialized to the resulting program object.
        """
        def cache_is_valid(cache):
            dev = self.device.queue_.device
            return any((dev.name, dev.platform.name, dev.driver_version) == did
                       for did in cache["devices"])

        binaries, my_defines = self._load_binary(defines, cache_file_name,
                                                 dtype, OCL, OCLS,
                                                 cache_is_valid,
                                                 template_kwargs)
        if binaries is not None:
            self.program_ = self.device.queue_.context.create_program(
                binaries, binary=True)
            self._log_about_cache(cache_file_name, OCL)
            return my_defines
        include_dirs = self._get_include_dirs(OCL)
        source, my_defines = self._generate_source(defines, include_dirs,
                                                   dtype, OCLS,
                                                   template_kwargs)
        show_logs = self.logger.isEnabledFor(logging.DEBUG)
        if show_logs:
            self.debug("%s: source code\n%s\n%s", cache_file_name, "-" * 80,
                       source)
        try:
            self.program_ = self.device.queue_.context.create_program(
                source, include_dirs,
                "-cl-nv-verbose" if show_logs and "cl_nv_compiler_options"
                in self.device.queue_.device.extensions else "")
        except Exception as e:
            with NamedTemporaryFile(mode="w",
                                    prefix="ocl_src_",
                                    suffix="." + OCLS,
                                    delete=False) as fout:
                fout.write(source)
                self.error(
                    "Failed to build OpenCL program. The input file "
                    "source was dumped to %s", fout.name)
            raise from_none(e)
        if show_logs and len(self.program_.build_logs):
            for s in self.program_.build_logs:
                s = s.strip()
                if not s:
                    continue
                self.debug("Non-empty OpenCL build log encountered: %s", s)
        self._save_to_cache(
            cache_file_name, OCLS, self.program_.source,
            self.program_.binaries, {
                "devices": [(d.name, d.platform.name, d.driver_version)
                            for d in self.program_.devices]
            })
        return my_defines
示例#32
0
 def initialize(self, **kwargs):
     """Initializes all the units belonging to this Workflow, in dependency
     order.
     """
     try:
         snapshot = kwargs["snapshot"]
     except KeyError:
         raise from_none(
             KeyError(
                 "\"snapshot\" (True/False) must be provided in kwargs"))
     units_number = len(self)
     fin_text = "%d units were initialized" % units_number
     maxlen = max([len(u.name) for u in self] + [len(fin_text)])
     if not self.is_standalone:
         self.verify_interface(IDistributable)
     progress = ProgressBar(
         maxval=units_number,
         term_width=min(80,
                        len(self) + 8 + maxlen),
         widgets=[Percentage(), ' ',
                  Bar(), ' ', ' ' * maxlen],
         poll=0)
     progress.widgets[0].TIME_SENSITIVE = True
     self.info("Initializing units in %s...", self.name)
     progress.start()
     units_in_dependency_order = list(self.units_in_dependency_order)
     iqueue = list(units_in_dependency_order)
     while len(iqueue) > 0:
         unit = iqueue.pop(0)
         # Early abort in case of KeyboardInterrupt
         if self.thread_pool.joined:
             break
         progress.widgets[-1] = unit.name + ' ' * (maxlen - len(unit.name))
         progress.update()
         if not self.is_standalone:
             unit.verify_interface(IDistributable)
         try:
             partially = unit.initialize(**kwargs)
         except:
             self.error("Unit \"%s\" failed to initialize", unit.name)
             raise
         if partially:
             iqueue.append(unit)
         else:
             if snapshot and not unit._remembers_gates:
                 unit.close_gate()
                 unit.close_upstream()
             progress.inc()
     progress.widgets[-1] = fin_text + ' ' * (maxlen - len(fin_text))
     progress.finish()
     initialized_units_number = len(units_in_dependency_order)
     if initialized_units_number < units_number:
         self.warning("Not all units were initialized (%d left): %s",
                      units_number - initialized_units_number,
                      set(self) - set(units_in_dependency_order))
示例#33
0
文件: logger.py 项目: zghzdxs/veles
 def __init__(self, **kwargs):
     self._logger_ = kwargs.get("logger",
                                logging.getLogger(self.__class__.__name__))
     try:
         super(Logger, self).__init__()
     except TypeError as e:
         mro = type(self).__mro__
         mro.index(Logger)
         self.error("Failed to call __init__ in super() = %s",
                    mro[mro.index(Logger) + 1])
         raise from_none(e)
示例#34
0
 def interval(self, value):
     try:
         vmin, vmax = value
     except (TypeError, ValueError):
         raise from_none(ValueError("interval must consist of two values"))
     for v in vmin, vmax:
         if not isinstance(v, (int, float)):
             raise TypeError(
                 "Each value in the interval must be either an int or a "
                 "float (got %s of %s)" % (v, v.__class__))
     self._interval = float(vmin), float(vmax)
示例#35
0
 def execute_kernel(self, global_size, local_size, kernel=None,
                    need_event=False):
     try:
         return self._backend_execute_kernel_(
             kernel or self._kernel_, global_size, local_size,
             need_event=need_event)
     except RuntimeError as e:
         self.error("execute_kernel(%s) has failed. global_size = %s, "
                    "local_size = %s", str(kernel or self._kernel_),
                    str(global_size), str(local_size))
         raise from_none(e)
示例#36
0
文件: logger.py 项目: 2php/veles
 def __init__(self, **kwargs):
     self._logger_ = kwargs.get(
         "logger", logging.getLogger(self.__class__.__name__))
     try:
         super(Logger, self).__init__()
     except TypeError as e:
         mro = type(self).__mro__
         mro.index(Logger)
         self.error("Failed to call __init__ in super() = %s",
                    mro[mro.index(Logger) + 1])
         raise from_none(e)
示例#37
0
文件: fullbatch.py 项目: ajkxyz/veles
 def _after_backend_init(self):
     try:
         self.fill_indices(0,
                           min(self.max_minibatch_size, self.total_samples))
     except CLRuntimeError as e:
         if e.code == CL_MEM_OBJECT_ALLOCATION_FAILURE:
             self.warning("Failed to store the entire dataset on the "
                          "device")
             self.force_numpy = True
             self.device = NumpyDevice()
         else:
             raise from_none(e)
     except CUDARuntimeError as e:
         if e.code == CUDA_ERROR_OUT_OF_MEMORY:
             self.warning("Failed to store the entire dataset on the "
                          "device")
             self.force_numpy = True
             self.device = NumpyDevice()
         else:
             raise from_none(e)
示例#38
0
 def interval(self, value):
     try:
         vmin, vmax = value
     except (TypeError, ValueError):
         raise from_none(ValueError("interval must consist of two values"))
     for v in vmin, vmax:
         if not isinstance(v, (int, float)):
             raise TypeError(
                 "Each value in the interval must be either an int or a "
                 "float (got %s of %s)" % (v, v.__class__))
     self._interval = float(vmin), float(vmax)
示例#39
0
    def __init__(self, thread_pool=None):
        if self.initialized:
            return
        self.initialized = True
        assert thread_pool is not None, (
            "GraphicsServer was not previously initialized")
        super(GraphicsServer, self).__init__()
        parser = GraphicsServer.init_parser()
        args, _ = parser.parse_known_args(self.argv)
        self._debug_pickle = args.graphics_pickle_debug
        zmq_endpoints = [ZmqEndpoint("bind", "inproc://veles-plots"),
                         ZmqEndpoint("bind", "rndipc://veles-ipc-plots-:")]
        ifaces = []
        for iface, _ in interfaces():
            if iface in root.common.graphics.blacklisted_ifaces:
                continue
            ifaces.append(iface)
            zmq_endpoints.append(ZmqEndpoint(
                "bind", "rndepgm://%s;%s:1024:65535:1" %
                        (iface, root.common.graphics.multicast_address)))
        self.debug("Trying to bind to %s...", zmq_endpoints)

        try:
            self.zmq_connection, btime = timeit(ZmqPublisher, zmq_endpoints)
        except zmq.error.ZMQError:
            self.exception("Failed to bind to %s", zmq_endpoints)
            raise from_none(GraphicsServer.InitializationError())

        # Important! Save the bound method to variable to avoid dead weak refs
        # See http://stackoverflow.com/questions/19443440/weak-reference-to-python-class-method  # nopep8
        self._shutdown_ = self.shutdown
        thread_pool.register_on_shutdown(self._shutdown_)

        # tmpfn, *ports = self.zmq_connection.rnd_vals
        tmpfn = self.zmq_connection.rnd_vals[0]
        ports = self.zmq_connection.rnd_vals[1:]

        self.endpoints = {"inproc": "inproc://veles-plots",
                          "ipc": "ipc://" + tmpfn,
                          "epgm": []}
        for port, iface in zip(ports, ifaces):
            self.endpoints["epgm"].append(
                "epgm://%s;%s:%d" %
                (iface, root.common.graphics.multicast_address, port))
        self.info("Publishing to %s", "; ".join([self.endpoints["inproc"],
                                                 self.endpoints["ipc"]] +
                                                self.endpoints["epgm"]))
        if btime > 1:
            self.warning(
                "EPGM bind took %d seconds - consider adding offending "
                "interfaces to root.common.graphics.blacklisted_ifaces or "
                "completely disabling graphics (-p '').",
                int(btime))
示例#40
0
文件: __main__.py 项目: zghzdxs/veles
 def _parse_optimization(self, args):
     if args.optimize is None:
         return
     optparsed = args.optimize.split(':')
     if len(optparsed) > 2:
         raise ValueError("Invalid --optimize value: %s" % args.optimize)
     try:
         self.optimization = int(optparsed[0]), \
             int(optparsed[1]) if len(optparsed) == 2 else None
     except ValueError:
         raise from_none(ValueError(
             "\"%s\" is not a valid --optimize value" % args.optimize))
示例#41
0
 def _parse_optimization(self, args):
     if args.optimize is None:
         return
     optparsed = args.optimize.split(':')
     if len(optparsed) > 2:
         raise ValueError("Invalid --optimize value: %s" % args.optimize)
     try:
         self.optimization = int(optparsed[0]), \
             int(optparsed[1]) if len(optparsed) == 2 else None
     except ValueError:
         raise from_none(ValueError(
             "\"%s\" is not a valid --optimize value" % args.optimize))
示例#42
0
 def map_minibatch_labels(self):
     if not self.has_labels:
         return
     self.minibatch_labels.map_write()
     for i, l in enumerate(self.raw_minibatch_labels[:self.minibatch_size]):
         try:
             self.minibatch_labels[i] = self.labels_mapping[l]
         except KeyError as e:
             if i == 0 and l is None:
                 self.error(
                     "Looks like you forgot to fill raw_minibatch_labels "
                     "inside fill_minibatch()")
             raise from_none(e)
示例#43
0
 def _link_attr(self, other, mine, yours, two_way):
     if isinstance(other, Container) and not hasattr(other, yours):
         setattr(other, yours, False)
     try:
         attr = getattr(other, yours)
     except AttributeError as e:
         self.error("Unable to link %s.%s to %s.%s",
                    other, yours, self, mine)
         raise from_none(e)
     if Unit.is_immutable(attr):
         LinkableAttribute(self, mine, (other, yours), two_way=two_way)
     else:
         setattr(self, mine, attr)
示例#44
0
文件: base.py 项目: 2php/veles
 def map_minibatch_labels(self):
     if not self.has_labels:
         return
     self.minibatch_labels.map_write()
     for i, l in enumerate(self.raw_minibatch_labels[:self.minibatch_size]):
         try:
             self.minibatch_labels[i] = self.labels_mapping[l]
         except KeyError as e:
             if i == 0 and l is None:
                 self.error(
                     "Looks like you forgot to fill raw_minibatch_labels "
                     "inside fill_minibatch()")
             raise from_none(e)
示例#45
0
 def initialize(self, **kwargs):
     """Initializes all the units belonging to this Workflow, in dependency
     order.
     """
     try:
         snapshot = kwargs["snapshot"]
     except KeyError:
         raise from_none(KeyError(
             "\"snapshot\" (True/False) must be provided in kwargs"))
     units_number = len(self)
     fin_text = "%d units were initialized" % units_number
     maxlen = max([len(u.name) for u in self] + [len(fin_text)])
     if not self.is_standalone:
         self.verify_interface(IDistributable)
     progress = ProgressBar(maxval=units_number,
                            term_width=min(80, len(self) + 8 + maxlen),
                            widgets=[Percentage(), ' ', Bar(), ' ',
                                     ' ' * maxlen], poll=0)
     progress.widgets[0].TIME_SENSITIVE = True
     self.info("Initializing units in %s...", self.name)
     progress.start()
     units_in_dependency_order = list(self.units_in_dependency_order)
     iqueue = list(units_in_dependency_order)
     while len(iqueue) > 0:
         unit = iqueue.pop(0)
         # Early abort in case of KeyboardInterrupt
         if self.thread_pool.joined:
             break
         progress.widgets[-1] = unit.name + ' ' * (maxlen - len(unit.name))
         progress.update()
         if not self.is_standalone:
             unit.verify_interface(IDistributable)
         try:
             partially = unit.initialize(**kwargs)
         except:
             self.error("Unit \"%s\" failed to initialize", unit.name)
             raise
         if partially:
             iqueue.append(unit)
         else:
             if snapshot and not unit._remembers_gates:
                 unit.close_gate()
                 unit.close_upstream()
             progress.inc()
     progress.widgets[-1] = fin_text + ' ' * (maxlen - len(fin_text))
     progress.finish()
     initialized_units_number = len(units_in_dependency_order)
     if initialized_units_number < units_number:
         self.warning("Not all units were initialized (%d left): %s",
                      units_number - initialized_units_number,
                      set(self) - set(units_in_dependency_order))
示例#46
0
    def ocl_build_program(self, defines, cache_file_name, dtype,
                          template_kwargs):
        """Builds the OpenCL program.

        `program_` will be initialized to the resulting program object.
        """

        def cache_is_valid(cache):
            return (self.device.queue_.device.name ==
                    cache["devices"][0][0] and
                    self.device.queue_.device.platform.name ==
                    cache["devices"][0][1])

        binaries, my_defines = self._load_binary(
            defines, cache_file_name, dtype, "ocl", "cl", cache_is_valid,
            template_kwargs)
        if binaries is not None:
            self.program_ = self.device.queue_.context.create_program(
                binaries, binary=True)
            self._log_about_cache(cache_file_name, "ocl")
            return my_defines
        include_dirs = self._get_include_dirs("ocl")
        source, my_defines = self._generate_source(
            defines, include_dirs, dtype, "cl", template_kwargs)
        show_logs = self.logger.isEnabledFor(logging.DEBUG)
        if show_logs:
            self.debug("%s: source code\n%s\n%s", cache_file_name, "-" * 80,
                       source)
        try:
            self.program_ = self.device.queue_.context.create_program(
                source, include_dirs,
                "-cl-nv-verbose" if show_logs and "cl_nv_compiler_options"
                in self.device.queue_.device.extensions else "")
        except Exception as e:
            with NamedTemporaryFile(mode="w", prefix="ocl_src_", suffix=".cl",
                                    delete=False) as fout:
                fout.write(source)
                self.error("Failed to build OpenCL program. The input file "
                           "source was dumped to %s", fout.name)
            raise from_none(e)
        if show_logs and len(self.program_.build_logs):
            for s in self.program_.build_logs:
                s = s.strip()
                if not s:
                    continue
                self.debug("Non-empty OpenCL build log encountered: %s", s)
        self._save_to_cache(cache_file_name, "cl", self.program_.source,
                            self.program_.binaries,
                            {"devices": [(d.name, d.platform.name)
                                         for d in self.program_.devices]})
        return my_defines
示例#47
0
文件: fullbatch.py 项目: ajkxyz/veles
    def initialize(self, device, **kwargs):
        super(FullBatchLoader, self).initialize(device=device, **kwargs)
        assert self.total_samples > 0
        self.analyze_original_dataset()
        self._map_original_labels()

        if isinstance(self.device, NumpyDevice):
            return

        self.info("Will try to store the entire dataset on the device")
        try:
            self.init_vectors(self.original_data, self.minibatch_data)
        except CLRuntimeError as e:
            if e.code == CL_MEM_OBJECT_ALLOCATION_FAILURE:
                self.warning("Failed to store the entire dataset on the "
                             "device")
                self.force_numpy = True
                self.device = NumpyDevice()
                return
            else:
                raise from_none(e)
        except CUDARuntimeError as e:
            if e.code == CUDA_ERROR_OUT_OF_MEMORY:
                self.warning("Failed to store the entire dataset on the "
                             "device")
                self.force_numpy = True
                self.device = NumpyDevice()
                return
            else:
                raise from_none(e)
        if self.has_labels:
            self.init_vectors(self._mapped_original_labels_,
                              self.minibatch_labels)

        if not self.shuffled_indices:
            self.shuffled_indices.mem = numpy.arange(self.total_samples,
                                                     dtype=Loader.LABEL_DTYPE)
        self.init_vectors(self.shuffled_indices, self.minibatch_indices)
示例#48
0
    def _parse_ensemble_train(self, args):
        if args.ensemble_train is None:
            return

        optparsed = args.ensemble_train.split(":")
        if len(optparsed) != 2:
            raise ValueError("--ensemble-train must be specified as"
                             "<number of instances>:<training set ratio>")
        try:
            self.ensemble_train = int(optparsed[0]), float(optparsed[1])
        except ValueError:
            raise from_none(
                "Failed to parse ensemble parameters from (%s, %s)" %
                optparsed)
示例#49
0
    def initialize(self, device, **kwargs):
        super(GDDeconv, self).initialize(device, **kwargs)

        if self.bias is not None:
            raise ValueError("bias should not be set")
        if (len(self.weights_shape) != 2 or
                self.weights_shape[0] != self.n_kernels or
                self.weights_shape[1] % (self.kx * self.ky) != 0):
            raise ValueError(
                "Incorrectly shaped weights encountered")
        if (len(self.input.shape) != 4 or
                self.input.shape[3] != self.n_kernels):
            raise ValueError(
                "Incorrectly shaped input encountered")
        if (len(self.err_output.shape) != 4 or
                self.err_output.shape[0] != self.input.shape[0]):
            raise ValueError(
                "Incorrectly shaped err_output encountered")

        sy, sx = self.ky_kx

        if self.weights.size != self.weights_number:
            raise ValueError(
                "Expected number of weights to match "
                "input, n_kernels, kx, ky parameters")

        try:
            Deconv.check_padding_is_safe(self.kx, self.ky, self.sliding)
        except ValueError as e:
            if not self.hits:
                raise from_none(e)
            self.warning("The padding will be unsafe")
        padding = Deconv.compute_padding(
            sx, sy, self.kx, self.ky, self.sliding)
        if self.padding is None:  # pylint: disable=E0203
            self.padding = padding
        elif self.padding != padding and not self.unsafe_padding:
            raise ValueError(
                "Expected padding %s got %s"
                % (str(padding), str(self.padding)))
        if self.hits:
            self.hits.initialize(self.device)

        self._dtype = self.err_output.dtype

        self._batch_size = self.err_output.shape[0]
        self._kernel_app_per_image = self.input.sample_size // self.n_kernels
        self._kernel_app_total = (self._kernel_app_per_image *
                                  self.input.shape[0])
        self._kernel_size = self.kx * self.ky * self.channels_number
示例#50
0
    def preprocess_image(self, data, color, crop, bbox):
        """
        Transforms images before serving.
        :param data: the loaded image data.
        :param color: The loaded image color space.
        :param crop: True if must crop the scaled image; otherwise, False.
        :param bbox: The bounding box of the labeled object. Tuple
        (ymin, ymax, xmin, xmax).
        :return: The transformed image data, the label value (from 0 to 1).
        """
        if color != self.color_space:
            method = getattr(cv2, "COLOR_%s2%s" % (color, self.color_space),
                             None)
            if method is None:
                aux_method = getattr(cv2, "COLOR_%s2BGR" % color)
                try:
                    data = cv2.cvtColor(data, aux_method)
                except cv2.error as e:
                    self.error("Failed to perform '%s' conversion", aux_method)
                    raise from_none(e)
                method = getattr(cv2, "COLOR_BGR2%s" % self.color_space)
            try:
                data = cv2.cvtColor(data, method)
            except cv2.error as e:
                self.error("Failed to perform '%s' conversion", method)
                raise from_none(e)

        if self.add_sobel:
            data = self.add_sobel_channel(data)
        if self.scale != 1.0:
            data, bbox = self.scale_image(data, bbox)
        if crop and self.crop is not None:
            data, label_value = self.crop_image(data, bbox)
        else:
            label_value = 1

        return data, label_value, bbox
示例#51
0
文件: image.py 项目: EgBulychev/veles
    def preprocess_image(self, data, color, crop, bbox):
        """
        Transforms images before serving.
        :param data: the loaded image data.
        :param color: The loaded image color space.
        :param crop: True if must crop the scaled image; otherwise, False.
        :param bbox: The bounding box of the labeled object. Tuple
        (ymin, ymax, xmin, xmax).
        :return: The transformed image data, the label value (from 0 to 1).
        """
        if color != self.color_space:
            method = getattr(
                cv2, "COLOR_%s2%s" % (color, self.color_space), None)
            if method is None:
                aux_method = getattr(cv2, "COLOR_%s2BGR" % color)
                try:
                    data = cv2.cvtColor(data, aux_method)
                except cv2.error as e:
                    self.error("Failed to perform '%s' conversion", aux_method)
                    raise from_none(e)
                method = getattr(cv2, "COLOR_BGR2%s" % self.color_space)
            try:
                data = cv2.cvtColor(data, method)
            except cv2.error as e:
                self.error("Failed to perform '%s' conversion", method)
                raise from_none(e)

        if self.add_sobel:
            data = self.add_sobel_channel(data)
        if self.scale != 1.0:
            data, bbox = self.scale_image(data, bbox)
        if crop and self.crop is not None:
            data, label_value = self.crop_image(data, bbox)
        else:
            label_value = 1

        return data, label_value, bbox
示例#52
0
    def _parse_ensemble_train(self, args):
        if args.ensemble_train is None:
            return

        optparsed = args.ensemble_train.split(":")
        if len(optparsed) != 2:
            raise ValueError(
                "--ensemble-train must be specified as"
                "<number of instances>:<training set ratio>")
        try:
            self.ensemble_train = int(optparsed[0]), float(optparsed[1])
        except ValueError:
            raise from_none(
                "Failed to parse ensemble parameters from (%s, %s)" %
                optparsed)
示例#53
0
 def fill(self):
     if self.input_field is None:
         try:
             value = float(self.input)
         except TypeError:
             raise from_none(TypeError("input has a wrong type %s - must be"
                                       " float" % self.input.__class__))
     elif isinstance(self.input_field, int):
         if self.input_field < 0 or self.input_field >= len(self.input):
             return
         value = self.input[self.input_field]
     else:
         value = self.input.__dict__[self.input_field]
     if type(value) == numpy.ndarray:
         value = value[self.input_offset]
     self.values.append(float(value))
示例#54
0
文件: saver.py 项目: zghzdxs/veles
    def load_data(self):
        self._file_ = open(self.file_name, "rb")
        (codec, class_lengths, self.old_max_minibatch_size,
         self.class_chunk_lengths,
         self.minibatch_data_shape, self.minibatch_data_dtype,
         self.minibatch_labels_shape, self.minibatch_labels_dtype,
         self._labels_mapping) = \
            pickle.load(self.file)
        self.class_lengths[:] = class_lengths
        self._has_labels = self.minibatch_labels_shape is not None
        self._reversed_labels_mapping[:] = sorted(self.labels_mapping)
        self.decompress = MinibatchesLoader.CODECS[codec]

        self.chunk_numbers = []
        for ci, cl in enumerate(self.class_lengths):
            mb_chunks = int(numpy.ceil(self.old_max_minibatch_size /
                                       self.class_chunk_lengths[ci]))
            mb_count = int(numpy.ceil(cl / self.old_max_minibatch_size))
            self.chunk_numbers.append(mb_chunks * mb_count)

        class BytesMeasurer(object):
            def __init__(self):
                self.size = 0

            def write(self, data):
                self.size += len(data)

        bm = BytesMeasurer()
        fake_table = [numpy.uint64(i) for i in range(sum(self.chunk_numbers))]
        pickle.dump(fake_table, bm, protocol=best_protocol)
        self.file.seek(-bm.size, SEEK_END)
        try:
            self.offset_table = pickle.load(self.file)
        except pickle.UnpicklingError as e:
            self.error("Failed to read the offset table (table offset was %d)",
                       bm.size)
            raise from_none(e)
        for i, offset in enumerate(self.offset_table):
            self.offset_table[i] = int(offset)
        # Virtual end
        self.offset_table.append(self.file.tell() - bm.size)
        self.debug("Offsets: %s", self.offset_table)
        if self.class_lengths[TRAIN] == 0:
            assert self.normalization_type == "none", \
                "You specified \"%s\" normalization but there are no train " \
                "samples to analyze." % self.normalization_type
            self.normalizer.analyze(self.minibatch_data.mem)