예제 #1
0
    def link_local(self):
        req_param(self, ['nifm', 'ifmshape', 'fshape'])

        opt_param(self, ['ofmlocs', 'links'])
        opt_param(self, ['deltasbuf', 'outputbuf'])

        opt_param(self, ['nofm'], self.nifm)
        opt_param(self, ['pooling'], False)
        opt_param(self, ['stride'], 1)
        opt_param(self, ['pad'], 0)

        assert len(self.ifmshape) == len(self.fshape)
        ofmshape = []
        for dim in range(len(self.ifmshape)):
            assert self.ifmshape[dim] >= self.fshape[dim]
            num = self.ifmshape[dim] - self.fshape[dim] + 2 * self.pad
            ofmshape.extend([num // self.stride + 1])
        self.ofmshape = tuple(ofmshape)
        self.negpad = -self.pad
        self.ifmsize = np.prod(self.ifmshape)
        self.ofmsize = np.prod(self.ofmshape)
        self.fpsize = np.prod(self.fshape)
        self.fsize = self.nifm * self.fpsize
        self.nout = self.nofm * self.ofmsize
        logger.debug('name=%s, nifm=%d, ifmshape=%s, ofmshape=%s',
                     self.name, self.nifm, self.ifmshape, self.ofmshape)
예제 #2
0
파일: layer.py 프로젝트: zz119/neon
    def __init__(self, **kwargs):
        self.initialized = False
        self.__dict__.update(kwargs)

        req_param(self, ['name'])

        opt_param(self, [
            'pre_act_dtype', 'output_dtype', 'deltas_dtype', 'weight_dtype',
            'updates_dtype'
        ], np.float32)
        opt_param(self, ['prev_layer'])
        opt_param(self, ['activation'], Linear())

        opt_param(self, ['is_local', 'is_data', 'is_cost'], False)
        opt_param(self, ['skip_act', 'has_params'], False)
        opt_param(self, ['prev_names'], [])

        opt_param(self, ['backend_type'], 'np.float32')
        if self.backend_type == 'np.float16':
            logger.info("Setting layer dtype to float16")
            for some_type in [
                    'pre_act_dtype', 'output_dtype', 'deltas_dtype',
                    'weight_dtype', 'updates_dtype'
            ]:
                setattr(self, some_type, np.float16)
예제 #3
0
파일: rnn.py 프로젝트: Eynaliyev/neon
 def __init__(self, **kwargs):
     self.accumulate = True
     # Reusing deltas not supported for RNNs yet
     self.reuse_deltas = False
     super(RNN, self).__init__(**kwargs)
     req_param(self, ['unrolls'])
     self.rec_layer = self.layers[1]
예제 #4
0
    def __init__(self, **kwargs):

        opt_param(self, ['preprocess_done'], False)
        opt_param(self, ['dotransforms', 'square_crop'], True)
        opt_param(self, ['mean_norm', 'unit_norm'], False)
        opt_param(self, ['shuffle_macro'], False)
        opt_param(self, ['tdims'], 0)
        opt_param(self, ['label_list'], ['l_id'])
        opt_param(self, ['num_channels'], 3)

        opt_param(self, ['num_workers'], 6)
        opt_param(self, ['backend_type'], 'np.float32')

        self.__dict__.update(kwargs)

        if self.backend_type in ['float16', 'np.float16', 'numpy.float16']:
            self.backend_type = np.float16
        elif self.backend_type in ['float32', 'np.float32', 'numpy.float32']:
            self.backend_type = np.float32
        else:
            raise ValueError('Datatype not understood')
        logger.warning("Imageset initialized with dtype %s", self.backend_type)
        req_param(self, ['cropped_image_size', 'output_image_size',
                         'imageset', 'save_dir', 'repo_path', 'macro_size'])

        opt_param(self, ['image_dir'], os.path.join(self.repo_path,
                                                    self.imageset))

        self.rgb = True if self.num_channels == 3 else False
        self.norm_factor = 128. if self.mean_norm else 256.
        self.img_dtype = np.int8
예제 #5
0
파일: imageset.py 프로젝트: neuroidss/neon
    def __init__(self, **kwargs):

        opt_param(self, ['preprocess_done'], False)
        opt_param(self, ['dotransforms', 'square_crop'], False)
        opt_param(self, ['mean_norm', 'unit_norm'], False)
        opt_param(self, ['shuffle_macro'], False)
        opt_param(self, ['tdims'], 0)
        opt_param(self, ['label_list'], ['l_id'])
        opt_param(self, ['num_channels'], 3)

        opt_param(self, ['num_workers'], 6)
        opt_param(self, ['backend_type'], 'np.float32')

        self.__dict__.update(kwargs)

        if self.backend_type in ['float16', 'np.float16', 'numpy.float16']:
            self.backend_type = np.float16
        elif self.backend_type in ['float32', 'np.float32', 'numpy.float32']:
            self.backend_type = np.float32
        else:
            raise ValueError('Datatype not understood')
        logger.warning("Imageset initialized with dtype %s", self.backend_type)
        req_param(self, ['cropped_image_size', 'output_image_size',
                         'imageset', 'save_dir', 'repo_path', 'macro_size'])

        opt_param(self, ['image_dir'], os.path.join(self.repo_path,
                                                    self.imageset))

        self.rgb = True if self.num_channels == 3 else False
        self.norm_factor = 128. if self.mean_norm else 256.
        self.img_dtype = np.int8
예제 #6
0
파일: layer.py 프로젝트: JesseLivezey/neon
 def initialize(self, kwargs):
     super(DataLayer, self).initialize(kwargs)
     self.reset_counter()
     if self.is_local is True:
         req_param(self, ['nofm', 'ofmshape'])
         self.nout = self.nofm * np.prod(self.ofmshape)
     else:
         req_param(self, ['nout'])
예제 #7
0
 def __init__(self, **kwargs):
     self.initialized = False
     self.__dict__.update(kwargs)
     req_param(self, ['dataset', 'model'])
     opt_param(self, ['backend'])
     opt_param(self, ['live'], False)
     if self.backend is not None:
         self.initialize(self.backend)
예제 #8
0
 def initialize(self, kwargs):
     super(DataLayer, self).initialize(kwargs)
     self.reset_counter()
     if self.is_local is True:
         req_param(self, ['nofm', 'ofmshape'])
         self.nout = self.nofm * np.prod(self.ofmshape)
     else:
         req_param(self, ['nout'])
예제 #9
0
파일: mlp.py 프로젝트: ivanajw/neon
 def __init__(self, **kwargs):
     self.initialized = False
     self.__dict__.update(kwargs)
     req_param(self, ['layers', 'batch_size'])
     opt_param(self, ['step_print'], -1)
     opt_param(self, ['accumulate'], False)
     opt_param(self, ['reuse_deltas'], True)
     opt_param(self, ['timing_plots'], False)
예제 #10
0
파일: recurrent.py 프로젝트: xiaoyunwu/neon
 def initialize(self, kwargs):
     super(RecurrentCostLayer, self).initialize(kwargs)
     req_param(self, ['cost', 'ref_layer'])
     opt_param(self, ['ref_label'], 'targets')
     self.targets = None
     self.cost.olayer = self.prev_layer
     self.cost.initialize(kwargs)
     self.deltas = self.cost.get_deltabuf()
예제 #11
0
파일: fit.py 프로젝트: neuroidss/neon
 def __init__(self, **kwargs):
     self.initialized = False
     self.__dict__.update(kwargs)
     req_param(self, ['dataset', 'model'])
     opt_param(self, ['backend'])
     opt_param(self, ['live'], False)
     if self.backend is not None:
         self.initialize(self.backend)
예제 #12
0
 def __init__(self, **kwargs):
     self.initialized = False
     self.__dict__.update(kwargs)
     req_param(self, ['layers', 'batch_size'])
     opt_param(self, ['step_print'], -1)
     opt_param(self, ['accumulate'], False)
     opt_param(self, ['reuse_deltas'], True)
     opt_param(self, ['timing_plots'], False)
     opt_param(self, ['serialize_schedule'])
예제 #13
0
파일: mlp.py 프로젝트: ruguevara/neon
 def __init__(self, **kwargs):
     self.initialized = False
     self.__dict__.update(kwargs)
     req_param(self, ["layers", "batch_size"])
     opt_param(self, ["step_print"], -1)
     opt_param(self, ["accumulate"], False)
     opt_param(self, ["reuse_deltas"], True)
     opt_param(self, ["timing_plots"], False)
     opt_param(self, ["serialize_schedule"])
예제 #14
0
파일: recurrent.py 프로젝트: xiaoyunwu/neon
    def initialize(self, kwargs):
        req_param(self, ['nout', 'nin', 'unrolls', 'activation'])
        super(RecurrentOutputLayer, self).initialize(kwargs)
        self.weight_shape = (self.nout, self.nin)
        self.bias_shape = (self.nout, 1)

        opt_param(self, ['delta_shape'], (self.nin, self.batch_size))  # moved
        self.allocate_output_bufs()
        self.allocate_param_bufs()
예제 #15
0
    def initialize(self, kwargs):
        if self.initialized:
            return
        self.__dict__.update(kwargs)
        req_param(self, ['backend', 'batch_size'])

        self.output = None
        self.deltas = None
        self.initialized = True
예제 #16
0
 def initialize(self, kwargs):
     self.__dict__.update(kwargs)
     req_param(self, ['backend', 'batch_size'])
     self.output = None
     self.deltas = None
     if self.is_local:
         self.nofm = self.end_idx
         self.end_idx = np.prod(self.ifmshape) * self.end_idx
     self.nout = self.end_idx
     self.allocate_output_bufs()
예제 #17
0
파일: recurrent.py 프로젝트: xiaoyunwu/neon
    def initialize(self, kwargs):
        req_param(self, ['weight_init_rec'])
        self.weight_rec_shape = (self.nout, self.nout)
        super(RecurrentLSTMLayer, self).initialize(kwargs)
        self.weight_shape = (self.nout, self.nin)
        self.bias_shape = (self.nout, 1)

        opt_param(self, ['delta_shape'], (self.nout, self.batch_size))
        self.allocate_output_bufs()
        self.allocate_param_bufs()
예제 #18
0
 def initialize(self, kwargs):
     req_param(self, ["ksize", "alpha", "beta"])
     self.alpha = self.alpha * 1.0 / self.ksize
     super(CrossMapResponseNormLayer, self).initialize(kwargs)
     self.nout = self.nin
     self.ofmshape, self.nofm = self.ifmshape, self.nifm
     self.allocate_output_bufs()
     self.tempbuf = None
     if isinstance(self.backend, CPU) and not self.prev_layer.is_data:
         self.tempbuf = self.backend.empty((1, self.ifmshape[-2], self.ifmshape[-1], self.batch_size))
예제 #19
0
파일: pooling.py 프로젝트: zz119/neon
    def initialize(self, kwargs):
        super(CrossMapPoolingLayer, self).initialize(kwargs)
        req_param(self, ['nofm'])

        self.initialize_local()
        self.allocate_output_bufs()
        self.allocate_param_bufs()
        opt_param(self, ['updatebuf'], None)
        if isinstance(self.backend, CPU):
            self.updatebuf = self.backend.empty((1, 1))
예제 #20
0
파일: pooling.py 프로젝트: AI-Cdrone/neon
    def initialize(self, kwargs):
        super(CrossMapPoolingLayer, self).initialize(kwargs)
        req_param(self, ['nofm'])

        self.initialize_local()
        self.allocate_output_bufs()
        self.allocate_param_bufs()
        opt_param(self, ['updatebuf'], None)
        if isinstance(self.backend, CPU):
            self.updatebuf = self.backend.empty((1, 1))
예제 #21
0
파일: balance.py 프로젝트: zz119/neon
 def __init__(self, **kwargs):
     self.accumulate = True
     super(Balance, self).__init__(**kwargs)
     req_param(self, ['classlayers', 'stylelayers'])
     self.cost_layer = self.classlayers[-1]
     self.out_layer = self.layers[-2]
     self.class_layer = self.classlayers[-2]
     self.branch_layer = self.stylelayers[-2]
     self.pathways = [self.layers, self.classlayers, self.stylelayers]
     self.kwargs = kwargs
예제 #22
0
파일: batch_norm.py 프로젝트: Tao2015/neon
    def initialize(self, kwargs):
        """
        Initialize the Batch Normalization transform. This function will be
        called from WeightLayer.initialize with a reference to the layer.

        Arguments:
            _eps (numeric, optional): value used for numerical stability when
                                      normalizing by variance
            _iscale (numeric, optional): explicitly set an affine scale value
                                         to be used in inference instead of
                                         calculated scale from training
            _ishift (numeric, optional): explicitly set an affine shift value
                                         to be used in inference instead of
                                         calculated shift from training
        """
        self.__dict__.update(kwargs)
        self.dtype = self.layer.weight_dtype
        self.bigtype = np.float32 if self.dtype is np.float16 else self.dtype
        opt_param(self, ['_iscale', '_ishift'])
        opt_param(self, ['_eps'], 1e-6)
        req_param(self, ['layer'])

        self.backend = self.layer.backend
        self.is_local = self.layer.is_local
        self.batch_size = self.layer.batch_size
        if self.is_local:
            self.in1d = (self.layer.nofm, 1)
            self.ofmsize = self.layer.ofmsize
            self.orig_shape = (self.layer.nofm * self.ofmsize, self.batch_size)
            self.in_shape = (self.layer.nofm, self.ofmsize * self.batch_size)
        else:
            self.in_shape = (self.layer.nout, self.batch_size)
            self.in1d = (self.layer.nout, 1)

        self.train_mode = True
        logger.info("BatchNormalization set to train mode")
        self.nbatches = 0

        self._xhat = self.backend.zeros(self.in_shape, dtype=self.dtype)

        self._mean = self.backend.zeros(self.in1d, dtype=self.bigtype)
        self._vars = self.backend.zeros(self.in1d, dtype=self.bigtype)

        # Global mean and var to be used during inference
        self._gmean = self.backend.zeros(self.in1d, dtype=self.bigtype)
        self._gvars = self.backend.zeros(self.in1d, dtype=self.bigtype)

        # learned params and their update buffers
        self._beta = self.backend.zeros(self.in1d, dtype=self.bigtype)
        self._gamma = self.backend.ones(self.in1d, dtype=self.bigtype)
        self.layer.params.extend([self._beta, self._gamma])

        self._beta_updates = self.backend.zeros(self.in1d, dtype=self.bigtype)
        self._gamma_updates = self.backend.zeros(self.in1d, dtype=self.bigtype)
        self.layer.updates.extend([self._beta_updates, self._gamma_updates])
예제 #23
0
파일: batch_norm.py 프로젝트: zz119/neon
    def initialize(self, kwargs):
        """
        Initialize the Batch Normalization transform. This function will be
        called from WeightLayer.initialize with a reference to the layer.

        Arguments:
            _eps (numeric, optional): value used for numerical stability when
                                      normalizing by variance
            _iscale (numeric, optional): explicitly set an affine scale value
                                         to be used in inference instead of
                                         calculated scale from training
            _ishift (numeric, optional): explicitly set an affine shift value
                                         to be used in inference instead of
                                         calculated shift from training
        """
        self.__dict__.update(kwargs)
        self.dtype = self.layer.weight_dtype
        self.bigtype = np.float32 if self.dtype is np.float16 else self.dtype
        opt_param(self, ['_iscale', '_ishift'])
        opt_param(self, ['_eps'], 1e-6)
        req_param(self, ['layer'])

        self.backend = self.layer.backend
        self.is_local = self.layer.is_local
        self.batch_size = self.layer.batch_size
        if self.is_local:
            self.in1d = (self.layer.nofm, 1)
            self.ofmsize = self.layer.ofmsize
            self.orig_shape = (self.layer.nofm * self.ofmsize, self.batch_size)
            self.in_shape = (self.layer.nofm, self.ofmsize * self.batch_size)
        else:
            self.in_shape = (self.layer.nout, self.batch_size)
            self.in1d = (self.layer.nout, 1)

        self.train_mode = True
        logger.info("BatchNormalization set to train mode")
        self.nbatches = 0

        self._xhat = self.backend.zeros(self.in_shape, dtype=self.dtype)

        self._mean = self.backend.zeros(self.in1d, dtype=self.bigtype)
        self._vars = self.backend.zeros(self.in1d, dtype=self.bigtype)

        # Global mean and var to be used during inference
        self._gmean = self.backend.zeros(self.in1d, dtype=self.bigtype)
        self._gvars = self.backend.zeros(self.in1d, dtype=self.bigtype)

        # learned params and their update buffers
        self._beta = self.backend.zeros(self.in1d, dtype=self.bigtype)
        self._gamma = self.backend.ones(self.in1d, dtype=self.bigtype)
        self.layer.params.extend([self._beta, self._gamma])

        self._beta_updates = self.backend.zeros(self.in1d, dtype=self.bigtype)
        self._gamma_updates = self.backend.zeros(self.in1d, dtype=self.bigtype)
        self.layer.updates.extend([self._beta_updates, self._gamma_updates])
예제 #24
0
 def initialize(self, kwargs):
     req_param(self, ['ksize', 'alpha', 'beta'])
     self.alpha = self.alpha * 1.0 / self.ksize
     super(CrossMapResponseNormLayer, self).initialize(kwargs)
     self.nout = self.nin
     self.ofmshape, self.nofm = self.ifmshape, self.nifm
     self.allocate_output_bufs()
     self.tempbuf = None
     if isinstance(self.backend, CPU) and not self.prev_layer.is_data:
         self.tempbuf = self.backend.empty(
             (1, self.ifmshape[-2], self.ifmshape[-1], self.batch_size))
예제 #25
0
 def initialize(self, kwargs):
     super(CostLayer, self).initialize(kwargs)
     req_param(self, ['cost'])
     opt_param(self, ['ref_label'], 'targets')
     opt_param(self, ['raw_label'], False)
     opt_param(self, ['category_label'], 'l_id')
     self.reference = None
     self.cost.olayer = self.prev_layer
     kwargs['raw_label'] = self.raw_label
     self.cost.initialize(kwargs)
     self.deltas = self.cost.get_deltabuf()
예제 #26
0
파일: cost.py 프로젝트: zz119/neon
 def initialize(self, kwargs):
     self.__dict__.update(kwargs)
     opt_param(self, ['backend'], self.olayer.backend)
     opt_param(self, ['batch_size'], self.olayer.batch_size)
     opt_param(self, ['olayer_data'], 'output')
     req_param(self.olayer, [self.olayer_data])
     # if not hasattr(self.olayer, self.olayer_data):
     #     raise ValueError("Layer %s does not have buffer %s" %
     #                      (self.olayer.name, self.olayer_data))
     # else:
     self.set_outputbuf(getattr(self.olayer, self.olayer_data))
예제 #27
0
파일: layer.py 프로젝트: jjcorreao/neon
    def initialize(self, kwargs):
        super(WeightLayer, self).initialize(kwargs)
        req_param(self, ['weight_init', 'lrule_init', 'nin', 'nout'])
        opt_param(self, ['accumulate'], False)
        opt_param(self, ['batch_norm'], False)

        self.weight_init.initialize(self.backend)
        self.params = []
        self.updates = []

        if self.batch_norm:
            self.bn = BatchNorm()
            kwargs['layer'] = self
            self.bn.initialize(kwargs)
예제 #28
0
파일: layer.py 프로젝트: zz119/neon
    def initialize(self, kwargs):
        super(WeightLayer, self).initialize(kwargs)
        req_param(self, ['weight_init', 'lrule_init', 'nin', 'nout'])
        opt_param(self, ['accumulate'], False)
        opt_param(self, ['batch_norm'], False)

        self.weight_init.initialize(self.backend)
        self.params = []
        self.updates = []

        if self.batch_norm:
            self.bn = BatchNorm()
            kwargs['layer'] = self
            self.bn.initialize(kwargs)
예제 #29
0
    def initialize(self, kwargs):
        super(WeightLayer, self).initialize(kwargs)
        req_param(self, ['nin', 'nout'])
        opt_param(self, ['weight_init'], default_weight_init())
        opt_param(self, ['lrule_init'], default_lrule_init())
        opt_param(self, ['accumulate'], False)
        opt_param(self, ['batch_norm'], False)
        opt_param(self, ['mempool'])  # Used for parallel mode

        self.weight_init.initialize(self.backend)
        self.params = []
        self.updates = []

        if self.batch_norm:
            self.bn = BatchNorm()
            kwargs['layer'] = self
            self.bn.initialize(kwargs)
예제 #30
0
    def load(self, backend=None, experiment=None):
        '''
        Imageset only supports nervanagpu based backends
        '''
        if not hasattr(self.backend, 'ng'):
            raise DeprecationWarning("Only nervanagpu-based backends "
                                     "supported.  For using cudanet backend, "
                                     "revert to neon 0.8.2 ")

        bdir = os.path.expanduser(self.save_dir)
        cachefile = os.path.join(bdir, 'dataset_cache.pkl')
        if not os.path.exists(cachefile):
            logger.error("Batch dir cache not found in %s:", cachefile)
            response = raw_input("Press Y to create, otherwise exit: ")
            if response == 'Y':
                from neon.util.batch_writer import (BatchWriter,
                                                    BatchWriterImagenet)

                if self.imageset.startswith('I1K'):
                    self.bw = BatchWriterImagenet(**self.__dict__)
                else:
                    self.bw = BatchWriter(**self.__dict__)
                self.bw.run()
                logger.error('Done writing batches - please rerun to train.')
            else:
                logger.error('Exiting...')
            sys.exit()
        cstats = deserialize(cachefile, verbose=False)
        if cstats['macro_size'] != self.macro_size:
            raise NotImplementedError("Cached macro size %d different from "
                                      "specified %d, delete save_dir %s "
                                      "and try again.",
                                      cstats['macro_size'],
                                      self.macro_size,
                                      self.save_dir)
        # Set the max indexes of batches for each from the cache file
        self.maxval = cstats['nval'] + cstats['val_start'] - 1
        self.maxtrain = cstats['ntrain'] + cstats['train_start'] - 1

        # Make sure only those properties not by yaml are updated
        cstats.update(self.__dict__)
        self.__dict__.update(cstats)
        # Should also put (in addition to nclass), number of train/val images
        req_param(self, ['ntrain', 'nval', 'train_start', 'val_start',
                         'train_mean', 'val_mean', 'labels_dict'])
예제 #31
0
파일: imageset.py 프로젝트: neuroidss/neon
    def load(self, backend=None, experiment=None):
        '''
        Imageset only supports nervanagpu based backends
        '''
        if not hasattr(self.backend, 'ng'):
            raise DeprecationWarning("Only nervanagpu-based backends "
                                     "supported.  For using cudanet backend, "
                                     "revert to neon 0.8.2 ")

        bdir = os.path.expanduser(self.save_dir)
        cachefile = os.path.join(bdir, 'dataset_cache.pkl')
        if not os.path.exists(cachefile):
            logger.error("Batch dir cache not found in %s:", cachefile)
            response = raw_input("Press Y to create, otherwise exit: ")
            if response == 'Y':
                from neon.util.batch_writer import (BatchWriter,
                                                    BatchWriterImagenet)

                if self.imageset.startswith('I1K'):
                    self.bw = BatchWriterImagenet(**self.__dict__)
                else:
                    self.bw = BatchWriter(**self.__dict__)
                self.bw.run()
                logger.error('Done writing batches - please rerun to train.')
            else:
                logger.error('Exiting...')
            sys.exit()
        cstats = deserialize(cachefile, verbose=False)
        if cstats['macro_size'] != self.macro_size:
            raise NotImplementedError("Cached macro size %d different from "
                                      "specified %d, delete save_dir %s "
                                      "and try again.",
                                      cstats['macro_size'],
                                      self.macro_size,
                                      self.save_dir)
        # Set the max indexes of batches for each from the cache file
        self.maxval = cstats['nval'] + cstats['val_start'] - 1
        self.maxtrain = cstats['ntrain'] + cstats['train_start'] - 1

        # Make sure only those properties not by yaml are updated
        cstats.update(self.__dict__)
        self.__dict__.update(cstats)
        # Should also put (in addition to nclass), number of train/val images
        req_param(self, ['ntrain', 'nval', 'train_start', 'val_start',
                         'train_mean', 'val_mean', 'labels_dict'])
예제 #32
0
파일: balance.py 프로젝트: zz119/neon
    def __init__(self, **kwargs):
        self.accumulate = True
        super(BalanceMP, self).__init__(**kwargs)
        req_param(self, ['costpaths'])
        # Append the prefix to the costpaths
        for ckey in self.costpaths.keys():
            self.costpaths[ckey] = self.prefixlayers + self.costpaths[ckey]

        self.cost_layer = self.costpaths['subject'][-1]
        self.branch_layer = self.costpaths['z'][-2]
        self.out_layer = self.layers[-2]

        softmaxlabels = filter(lambda x: x != 'z', self.costpaths.keys())
        self.softlayers = [self.costpaths[ck][-2] for ck in softmaxlabels]

        self.pathways = [self.layers, self.costpaths['z']]
        self.pathways += [self.costpaths[ck] for ck in softmaxlabels]
        self.path_skip_act = [False, False] + [True for ck in softmaxlabels]
        self.kwargs = kwargs
예제 #33
0
파일: layer.py 프로젝트: jjcorreao/neon
    def __init__(self, **kwargs):
        self.initialized = False
        self.__dict__.update(kwargs)

        req_param(self, ['name'])

        opt_param(self, ['pre_act_dtype', 'output_dtype', 'deltas_dtype',
                         'weight_dtype', 'updates_dtype'], np.float32)
        opt_param(self, ['prev_layer'])
        opt_param(self, ['activation'], Linear())

        opt_param(self, ['is_local', 'is_data', 'is_cost'], False)
        opt_param(self, ['is_random'], False)

        opt_param(self, ['skip_act', 'has_params'], False)
        opt_param(self, ['prev_names'], [])

        opt_param(self, ['backend_type'], 'np.float32')
        if self.backend_type == 'np.float16':
            logger.info("Setting layer dtype to float16")
            for some_type in ['pre_act_dtype', 'output_dtype', 'deltas_dtype',
                              'weight_dtype', 'updates_dtype']:
                setattr(self, some_type, np.float16)
예제 #34
0
파일: pooling.py 프로젝트: AI-Cdrone/neon
 def __init__(self, **kwargs):
     self.is_local = True
     super(PoolingLayer, self).__init__(**kwargs)
     req_param(self, ['op'])
     opt_param(self, ['maxout'], 1)
예제 #35
0
 def __init__(self, **kwargs):
     super(SubConvLayer, self).__init__(**kwargs)
     req_param(self, ['endidx'])
예제 #36
0
 def __init__(self, **kwargs):
     super(SubConvLayer, self).__init__(**kwargs)
     req_param(self, ['endidx'])
예제 #37
0
    def initialize(self, kwargs):
        """
        Initialize the Batch Normalization transform. This function will be
        called from WeightLayer.initialize with a reference to the layer.

        Arguments:
            _eps (numeric, optional): value used for numerical stability when
                                      normalizing by variance
            _iscale (numeric, optional): explicitly set an affine scale value
                                         to be used in inference instead of
                                         calculated scale from training
            _ishift (numeric, optional): explicitly set an affine shift value
                                         to be used in inference instead of
                                         calculated shift from training
        """
        self.__dict__.update(kwargs)
        self.dtype = self.layer.weight_dtype
        opt_param(self, ['_eps'], 1e-6)
        opt_param(self, ['_rho'], 0.99)

        req_param(self, ['layer'])

        self.backend = self.layer.backend
        self.is_local = self.layer.is_local
        self.batch_size = self.layer.batch_size
        if self.is_local:
            self.in1d = (self.layer.nofm, 1)
            self.ofmsize = self.layer.ofmsize
            self.in_shape = (self.layer.nofm, self.ofmsize * self.batch_size)
            make_zbuf = self.backend.allocate_fragment
        else:
            self.in_shape = (self.layer.nout, self.batch_size)
            self.in1d = (self.layer.nout, 1)
            make_zbuf = self.backend.empty

        self.train_mode = True
        logger.info("BatchNormalization set to train mode")

        self._xhat = make_zbuf(self.in_shape, dtype=self.dtype,
                               persist_values=False)

        self._mean = self.backend.zeros(self.in1d, dtype=self.dtype,
                                        persist_values=False)
        self._vars = self.backend.zeros(self.in1d, dtype=self.dtype,
                                        persist_values=False)

        # learned params and their update buffers
        self._beta = self.backend.zeros(self.in1d, dtype=self.dtype,
                                        persist_values=False)
        self._gamma = self.backend.ones(self.in1d, dtype=self.dtype,
                                        persist_values=False)

        self.layer.params.extend([self._beta, self._gamma])

        if self.backend.is_dist:
            self._beta.ptype = self._gamma.ptype = 'replica'
            self._mean.ptype = self._vars.ptype = 'replica'

        # Global mean and var to be used during inference
        self._gmean = self.backend.zeros_like(self._mean, dtype=self.dtype,
                                              persist_values=True)
        self._gvars = self.backend.zeros_like(self._vars, dtype=self.dtype,
                                              persist_values=True)

        self._beta_updates = self.backend.zeros_like(self._beta,
                                                     dtype=self.dtype)
        self._gamma_updates = self.backend.zeros_like(self._gamma,
                                                      dtype=self.dtype)
        self.layer.updates.extend([self._beta_updates, self._gamma_updates])
예제 #38
0
파일: pooling.py 프로젝트: zz119/neon
 def __init__(self, **kwargs):
     self.is_local = True
     super(PoolingLayer, self).__init__(**kwargs)
     req_param(self, ['op'])
     opt_param(self, ['maxout'], 1)
예제 #39
0
 def initialize(self, kwargs):
     super(ActivationLayer, self).initialize(kwargs)
     req_param(self, ['activation'])
     self.nout = self.nin
     self.allocate_output_bufs()
예제 #40
0
 def initialize(self, kwargs):
     super(CompositeLayer, self).initialize(kwargs)
     req_param(self, ['sublayers'])
     self.has_params = True
     for subl in self.sublayers:
         subl.initialize(kwargs)
예제 #41
0
파일: compositional.py 프로젝트: zz119/neon
 def initialize(self, kwargs):
     super(CompositeLayer, self).initialize(kwargs)
     req_param(self, ['sublayers'])
     self.has_params = True
     for subl in self.sublayers:
         subl.initialize(kwargs)
예제 #42
0
 def __init__(self, **kwargs):
     super(SliceLayer, self).__init__(**kwargs)
     req_param(self, ['end_idx'])