Пример #1
0
 def __init__(self, workflow, **kwargs):
     super(GradientsCalculator, self).__init__(workflow, **kwargs)
     self.vbias_grad = Array()
     self.hbias_grad = Array()
     self.weights_grad = Array()
     self.demand("hbias1", "vbias1", "hbias0", "vbias0", "weights0",
                 "weights1")
Пример #2
0
 def __init__(self, workflow, **kwargs):
     super(EvaluatorSoftmax, self).__init__(workflow, **kwargs)
     self.compute_confusion_matrix = kwargs.get("compute_confusion_matrix",
                                                True)
     self.confusion_matrix = Array()
     self.n_err = Array()
     self.max_err_output_sum = Array()
     self.demand("labels", "max_idx")
Пример #3
0
 def __init__(self, workflow, **kwargs):
     super(Uniform, self).__init__(workflow, **kwargs)
     self.num_states = kwargs.get("num_states", 256)
     self.states = Array()
     self.prng = kwargs.get("prng", get())
     self.output_bytes = kwargs.get("output_bytes", 0)
     self.output = Array()
     self.cl_const = numpy.zeros(1, dtype=numpy.int32)
Пример #4
0
 def __init__(self, workflow, **kwargs):
     super(EvaluatorMSE, self).__init__(workflow, **kwargs)
     self.metrics = Array()
     self.mse = Array()
     self.labels = None
     self.class_targets = None
     self.n_err = Array()
     self.root = kwargs.get("root", True)
     self.demand("target", "normalizer")
Пример #5
0
 def __init__(self, workflow, **kwargs):
     kwargs["view_group"] = kwargs.get("view_group", "EVALUATOR")
     super(EvaluatorBase, self).__init__(workflow, **kwargs)
     self.mean = kwargs.get("mean", True)
     self.err_output = Array()
     self._merged_output = Array()
     self.krn_constants_i_ = None
     self.krn_constants_f_ = None
     self.demand("output", "batch_size")
     if self.testing:
         self.demand("class_lengths", "offset")
Пример #6
0
 def __init__(self, workflow, **kwargs):
     super(KohonenForward, self).__init__(workflow, **kwargs)
     self.demand("input", "weights")
     self.argmins = None
     self._distances = Array()
     self.output = Array()
     self._chunk_size_ = 0
     self.weights_transposed = False
     self.total = Array() if kwargs.get("total", False) else None
     if self.total is not None:
         self.minibatch_offset = None
         self.minibatch_size = None
         self.batch_size = None
Пример #7
0
 def __init__(self, workflow, **kwargs):
     super(MultiHistogram, self).__init__(workflow, **kwargs)
     self.limit = kwargs.get("limit", 64)
     self.value = Array()
     self.n_bars = kwargs.get("n_bars", 25)
     self.hist_number = kwargs.get("hist_number", 16)
     self.demand("input")
Пример #8
0
 def __init__(self, workflow, **kwargs):
     super(Cutter1D, self).__init__(workflow, **kwargs)
     self.alpha = kwargs.get("alpha")
     self.beta = kwargs.get("beta")
     self.output_offset = kwargs.get("output_offset", 0)
     self.output = Array()
     self.demand("alpha", "beta", "input")
Пример #9
0
 def clone(self):
     for unit, attrs in self.reals.items():
         for attr in attrs:
             value = getattr(unit, attr)
             if self.is_immutable(value):
                 setattr(self, attr, value)
                 continue
             if not isinstance(value, Array):
                 cloned = getattr(self, attr, None)
                 if cloned is None:
                     setattr(self, attr, deepcopy(value))
                     continue
                 if isinstance(value, list):
                     del cloned[:]
                     cloned.extend(value)
                 elif isinstance(value, (dict, set)):
                     cloned.clear()
                     cloned.update(value)
                 elif isinstance(value, Bool):
                     cloned <<= value
                 elif isinstance(value, numpy.ndarray):
                     cloned[:] = value
                 else:
                     setattr(self, attr, deepcopy(value))
                 continue
             vec = getattr(self, attr, None)
             if vec is None:
                 vec = Array()
                 self.vectors[value] = vec
                 setattr(self, attr, vec)
             else:
                 assert isinstance(vec, Array)
             if not vec and value:
                 vec.reset(value.mem.copy())
Пример #10
0
 def __init__(self, workflow, **kwargs):
     kwargs["view_group"] = kwargs.get("view_group", "WORKER")
     super(MeanDispNormalizer, self).__init__(workflow, **kwargs)
     self.output = Array()
     self.global_size = None
     self.local_size = None
     self.demand("input", "mean", "rdisp")
Пример #11
0
    def __init__(self, workflow, **kwargs):
        kwargs["view_group"] = kwargs.get("view_group", "TRAINER")
        super(GradientDescentBase, self).__init__(workflow, **kwargs)
        self.err_input = Array(shallow_pickle=True)
        self.ocl_set_const_args = True
        self.weights = None
        self.bias = None
        self.demand("input", "err_output")
        self.learning_rate = kwargs.get("learning_rate", 0.01)
        self.learning_rate_bias = kwargs.get("learning_rate_bias",
                                             self.learning_rate)
        self.weights_decay = kwargs.get("weights_decay", 0.00005)
        self.weights_decay_bias = kwargs.get("weights_decay_bias", 0.0)
        self.l1_vs_l2 = kwargs.get("l1_vs_l2", 0)
        self.l1_vs_l2_bias = kwargs.get("l1_vs_l2_bias", self.l1_vs_l2)
        self.gradient_moment = kwargs.get("gradient_moment", 0)
        self.gradient_moment_bias = kwargs.get("gradient_moment_bias",
                                               self.gradient_moment)
        self.weights_transposed = kwargs.get("weights_transposed", False)
        self.need_err_input = kwargs.get("need_err_input", True)
        self.include_bias = kwargs.get("include_bias", True)
        self.factor_ortho = kwargs.get("factor_ortho", 0)
        self.col_sums = Array()  # for orthogonalization

        # Current gradient as it is without applying learning_rate etc.
        self.gradient_weights = Array()
        self.gradient_bias = Array()

        # Gradient with applied learning_rate etc.
        # optionally accumulated from the previous run
        self.accumulate_gradient = kwargs.get("accumulate_gradient", False)

        # When accumulate_gradient set to True:
        # 1. Calculate gd
        # 2. acc = acc_alpha * gd + acc_beta * acc
        # 3. gd = gd_alpha * acc + gd_beta * gd
        # 4. Apply moments to gd
        # 5. weights += gd if apply_gradient set to True
        self.acc_alpha = kwargs.get("acc_alpha", 0.0)
        self.acc_beta = kwargs.get("acc_beta", 0.0)
        self.gd_alpha = kwargs.get("gd_alpha", 0.0)
        self.gd_beta = kwargs.get("gd_beta", 1.0)

        self.accumulated_gradient_weights = Array()
        self.accumulated_gradient_bias = Array()

        # Gradient with accumulated moments
        self.gradient_weights_with_moment = Array()
        self.gradient_bias_with_moment = Array()

        # Sets to True when gradient changes
        self.gradient_changed = False

        # Gradient will be applied to weights immediately just after computing
        self.apply_gradient = kwargs.get("apply_gradient",
                                         not workflow.is_slave)
Пример #12
0
 def __init__(self, workflow, **kwargs):
     kwargs["view_group"] = kwargs.get("view_group", "WORKER")
     super(Forward, self).__init__(workflow, **kwargs)
     self.weights_stddev = kwargs.get("weights_stddev")
     self.bias_stddev = kwargs.get("bias_stddev", self.weights_stddev)
     self.weights_filling = kwargs.get("weights_filling", "uniform")
     self.bias_filling = kwargs.get("bias_filling", "uniform")
     self.rand = kwargs.get("rand", prng.get())
     self.weights_transposed = kwargs.get("weights_transposed", False)
     self.include_bias = kwargs.get("include_bias", True)
     self.demand("input")
     self.output = Array(shallow_pickle=True)
     self.weights = Array()
     self.bias = Array()
     self.forward_mode = False
     self.exports = ["weights", "bias", "include_bias",
                     "weights_transposed"]
Пример #13
0
 def __init__(self, workflow, **kwargs):
     name = kwargs.get("name", "Table")
     kwargs["name"] = name
     super(TableMaxMin, self).__init__(workflow, **kwargs)
     self.row_labels = ["max", "min"]
     self.col_labels = []
     self.y = []
     self.values = Array()
Пример #14
0
 def __init__(self, workflow, **kwargs):
     super(KohonenTrainer, self).__init__(workflow, **kwargs)
     self._distances = Array()
     self.argmins = Array()
     self._coords = Array()
     self.weights = Array()
     self.winners = Array()
     self.weights_filling = kwargs.get("weights_filling", "uniform")
     self.weights_stddev = kwargs.get("weights_stddev", None)
     self.weights_transposed = kwargs.get("weights_transposed", False)
     self.time = 0
     self._sigma = 0
     self.gradient_decay = kwargs.get("gradient_decay", lambda t: 0.1 /
                                      (1.0 + t * 0.05))
     self.radius_decay = kwargs.get("radius_decay", lambda t: 1.0 /
                                    (1.0 + t * 0.05))
     self.demand("input", "shape")
     self._shape = kwargs.get("shape")
Пример #15
0
 def __init__(self, workflow, **kwargs):
     super(Deconv, self).__init__(workflow, **kwargs)
     self.unsafe_padding = kwargs.get("unsafe_padding", False)
     self.hits = Array()
     self.krn_clear_output_ = None
     self._global_size = None
     self._local_size = None
     del self.bias
     self.demand("n_kernels", "kx", "ky", "padding", "sliding", "input",
                 "weights", "output_shape_source")
Пример #16
0
 def __init__(self, workflow, **kwargs):
     super(FixAccumulator, self).__init__(workflow)
     self.bars = kwargs.get("bars", 200)
     self.type = kwargs.get("type", "relu")
     self.input = None
     self.output = Array()
     self.reset_flag = Bool(True)
     self.n_bars = [0]
     self.max = 100
     self.min = 0
Пример #17
0
 def __init__(self, workflow, **kwargs):
     super(ImagenetLoader, self).__init__(workflow, **kwargs)
     self.mean = Array()
     self.rdisp = Array()
     self.file_samples = ""
     self.crop_size_sx = kwargs.get("crop_size_sx", 227)
     self.crop_size_sy = kwargs.get("crop_size_sy", 227)
     self.sx = kwargs.get("sx", 256)
     self.sy = kwargs.get("sy", 256)
     self.shuffle_limit = kwargs.get("shuffle_limit", 2000000000)
     self.original_labels_filename = kwargs.get("original_labels_filename",
                                                None)
     self.count_samples_filename = kwargs.get("count_samples_filename",
                                              None)
     self.matrixes_filename = kwargs.get("matrixes_filename", None)
     self.samples_filename = kwargs.get("samples_filename", None)
     self.has_mean_file = False
     self.do_mirror = False
     self.mirror = kwargs.get("mirror", False)
     self.channels = kwargs.get("channels", 3)
Пример #18
0
 def __init__(self, workflow, **kwargs):
     super(DeviceBenchmark, self).__init__(workflow, **kwargs)
     self.precision = kwargs.get("dtype", root.common.engine.precision_type)
     self.dtype = opencl_types.dtypes[self.precision]
     self.size = kwargs.get("size", 1500)
     self.repeats = kwargs.get("repeats", 10)
     self._input_A_ = Array()
     self._input_B_ = Array()
     msize = self.size * self.size
     from veles.prng.random_generator import RandomGenerator
     rnd = RandomGenerator(None)
     genmem = lambda: rnd.rand(msize).astype(self.dtype) - 0.5
     self._input_A_.mem = genmem()
     self._input_B_.mem = genmem()
     self.block_size = kwargs.get("block_size")
     self.vector_opt = kwargs.get("vector_opt")
     self.precision_level = kwargs.get("precision_level",
                                       root.common.engine.precision_level)
     self.return_time = kwargs.get("return_time", False)
     self.dry_run_first = kwargs.get("dry_run_first", False)
Пример #19
0
    def __init__(self, workflow, **kwargs):
        kwargs["view_group"] = kwargs.get("view_group", "TRAINER")
        super(GradientDescentBase, self).__init__(workflow, **kwargs)
        self.err_input = Array(shallow_pickle=True)
        self.ocl_set_const_args = True
        self.weights = None
        self.bias = None
        self.demand("input", "err_output")
        self.learning_rate = kwargs.get("learning_rate", 0.01)
        self.learning_rate_bias = kwargs.get("learning_rate_bias",
                                             self.learning_rate)
        self.weights_decay = kwargs.get("weights_decay", 0.00005)
        self.weights_decay_bias = kwargs.get("weights_decay_bias", 0.0)
        self.l1_vs_l2 = kwargs.get("l1_vs_l2", 0)
        self.l1_vs_l2_bias = kwargs.get("l1_vs_l2_bias", self.l1_vs_l2)
        self.gradient_moment = kwargs.get("gradient_moment", 0)
        self.gradient_moment_bias = kwargs.get("gradient_moment_bias",
                                               self.gradient_moment)
        self.weights_transposed = kwargs.get("weights_transposed", False)
        self.need_err_input = kwargs.get("need_err_input", True)
        self.include_bias = kwargs.get("include_bias", True)
        self.factor_ortho = kwargs.get("factor_ortho", 0)
        self.col_sums = Array()  # for orthogonalization

        # Current gradient as it is without applying learning_rate etc.
        self.gradient_weights = Array()
        self.gradient_bias = Array()

        # Gradient with applied learning_rate etc.
        # optionally accumulated from the previous run
        self.accumulated_gradient_weights = Array()
        self.accumulated_gradient_bias = Array()

        # Gradient with accumulated moments
        self.gradient_weights_with_moment = Array()
        self.gradient_bias_with_moment = Array()

        # Sets to True when gradient changes
        self.gradient_changed = False

        # Gradient will be applied to weights immediately just after computing
        self.apply_gradient = kwargs.get("apply_gradient",
                                         not workflow.is_slave)

        # Accumulates gradient from the previous run:
        # OP_NONE: do not allocate array at all
        # OP_STORE: stores gradient with an applied learning_rate etc.
        # OP_ADD: adds current gradient to the array
        # OP_FLUSH: applies accumulated gradient, then resets it to zero
        self.accumulate_gradient = kwargs.get("accumulate_gradient",
                                              self.OP_NONE)
Пример #20
0
    def __init__(self, workflow, **kwargs):
        super(ImagenetLoaderBase, self).__init__(workflow, **kwargs)
        self.mean = Array()
        self.rdisp = Array()
        self._file_samples_ = ""
        self.sx = kwargs.get("sx", 256)
        self.sy = kwargs.get("sy", 256)
        self.channels = kwargs.get("channels", 3)
        self.original_labels_filename = kwargs.get("original_labels_filename")
        self.count_samples_filename = kwargs.get("count_samples_filename")
        self.matrixes_filename = kwargs.get("matrixes_filename")
        self.samples_filename = kwargs.get("samples_filename")
        self.class_keys_path = kwargs.get("class_keys_path")
        self.final_sy = self.sy
        self.final_sx = self.sx
        self._train_different_labels_ = defaultdict(int)
        self.class_keys = None

        if self.class_keys_path is not None:
            with open(self.class_keys_path, "r") as fin:
                self.class_keys = json.load(fin)
            self.info("Class keys was loaded: len %s" % len(self.class_keys))
Пример #21
0
    def __init__(self, workflow, **kwargs):
        self._solvers = set()
        super(GradientDescent, self).__init__(workflow, **kwargs)
        s = kwargs.get("solvers", set())
        self.solvers = s

        self.reduce_size = self.REDUCE_SIZE
        self.krn_err_input_ = None
        self.krn_weights_ = None
        self.krn_err_output_ = None
        self.krn_bias_ = None
        self.krn_compute_col_sums_ = None
        self.krn_err_output_name = None
        self.demand("weights")
        if self.include_bias:
            self.demand("bias")

        self.last_minibatch = None

        self.variant_gradient = kwargs.get("variant_gradient", True)
        self.variant_moment_gradient = (
            kwargs.get("variant_moment_gradient", True))
        if "fast" in self.solvers:
            self.fast = FastGDObjects(kwargs.get("fast_learning_rate", 0.02),
                                      Array(), Array())
        if "adadelta" in self.solvers:
            self.adadelta = AdaDeltaGDObjects(
                kwargs.get("adadelta_momentum", 0.9),
                Array(), Array(),
                Array(), Array(),
                kwargs.get("adadelta_adom", 0.3),
                kwargs.get("adadelta_epsilon", 1e-8))
            self.adadelta_adom = self.adadelta.adom

        if "adagrad" in self.solvers:
            self.adagrad = AdaGradGDObjects(
                kwargs.get("adagrad_epsilon", 1e-8),
                Array(), Array())

        self.last_minibatch = kwargs.get("last_minibatch", False)
Пример #22
0
 def __init__(self, workflow, **kwargs):
     super(ImageLoader, self).__init__(workflow, **kwargs)
     self.color_space = kwargs.get("color_space", "RGB")
     self._source_dtype = numpy.float32
     self._original_shape = tuple()
     self.class_keys = [[], [], []]
     self.verify_interface(IImageLoader)
     self.path_to_mean = kwargs.get("path_to_mean", None)
     self.add_sobel = kwargs.get("add_sobel", False)
     self.mirror = kwargs.get("mirror", False)  # True, False, "random"
     self.scale = kwargs.get("scale", 1.0)
     self.scale_maintain_aspect_ratio = kwargs.get(
         "scale_maintain_aspect_ratio", True)
     self.rotations = kwargs.get("rotations", (0.0, ))  # radians
     self.crop = kwargs.get("crop", None)
     self.crop_number = kwargs.get("crop_number", 1)
     self._background = None
     self.background_image = kwargs.get("background_image", None)
     self.background_color = kwargs.get("background_color",
                                        (0xff, 0x14, 0x93))
     self.smart_crop = kwargs.get("smart_crop", True)
     self.minibatch_label_values = Array()
Пример #23
0
 def __init__(self, workflow, **kwargs):
     super(InputJoiner, self).__init__(workflow, **kwargs)
     self.output = Array()
     self._num_inputs = 0
     self.inputs = kwargs.get("inputs")
Пример #24
0
 def __init__(self, workflow, **kwargs):
     super(DropoutForward, self).__init__(workflow, **kwargs)
     self.mask = Array()  # dropout mask
     self.states = Array()
     self.rand = random_generator.get()
Пример #25
0
 def __init__(self, workflow, **kwargs):
     super(GDSummator, self).__init__(workflow, **kwargs)
     self.err_x = Array()
     self.err_y = Array()
     self.demand("err_output")
Пример #26
0
 def __init__(self, workflow, **kwargs):
     super(Summator, self).__init__(workflow, **kwargs)
     self.output = Array()
     self.demand("x", "y")
Пример #27
0
 def __init__(self, workflow, **kwargs):
     super(BatchWeights, self).__init__(workflow, **kwargs)
     self.vbias_batch = Array()
     self.hbias_batch = Array()
     self.weights_batch = Array()
     self.demand("v", "h", "batch_size")
Пример #28
0
 def __init__(self, workflow, **kwargs):
     super(MemCpy, self).__init__(workflow, **kwargs)
     self.output = Array()
     self.demand("input")
Пример #29
0
 def __init__(self, workflow, **kwargs):
     super(Binarization, self).__init__(workflow, **kwargs)
     self.output = Array()
     self.rand = kwargs.get("rand", prng.get())
     self.demand("input", "batch_size")
Пример #30
0
    def __init__(self, workflow, **kwargs):
        super(ZeroFiller, self).__init__(workflow, **kwargs)

        self.mask = Array()
        self.grouping = kwargs.get("grouping", 1)
        self.demand("weights")