def get_fixed_var_descr(self, model, X, Y): """ .. todo:: WRITEME """ assert Y is not None batch_size = model.batch_size drop_mask_X = sharedX(model.get_input_space().get_origin_batch(batch_size)) drop_mask_X.name = 'drop_mask' X_space = model.get_input_space() updates = OrderedDict() rval = FixedVarDescr() inputs=[X, Y] if not self.supervised: update_X = self.mask_gen(X, X_space = X_space) else: drop_mask_Y = sharedX(np.ones(batch_size,)) drop_mask_Y.name = 'drop_mask_Y' update_X, update_Y = self.mask_gen(X, Y, X_space) updates[drop_mask_Y] = update_Y rval.fixed_vars['drop_mask_Y'] = drop_mask_Y if self.mask_gen.sync_channels: n = update_X.ndim assert n == drop_mask_X.ndim - 1 update_X.name = 'raw_update_X' zeros_like_X = T.zeros_like(X) zeros_like_X.name = 'zeros_like_X' update_X = zeros_like_X + update_X.dimshuffle(0,1,2,'x') update_X.name = 'update_X' updates[drop_mask_X] = update_X rval.fixed_vars['drop_mask'] = drop_mask_X if hasattr(model.inference_procedure, 'V_dropout'): include_prob = model.inference_procedure.include_prob include_prob_V = model.inference_procedure.include_prob_V include_prob_Y = model.inference_procedure.include_prob_Y theano_rng = MRG_RandomStreams(2012+11+20) for elem in flatten([model.inference_procedure.V_dropout]): updates[elem] = theano_rng.binomial(p=include_prob_V, size=elem.shape, dtype=elem.dtype, n=1) / include_prob_V if "Softmax" in str(type(model.hidden_layers[-1])): hid = model.inference_procedure.H_dropout[:-1] y = model.inference_procedure.H_dropout[-1] updates[y] = theano_rng.binomial(p=include_prob_Y, size=y.shape, dtype=y.dtype, n=1) / include_prob_Y else: hid = model.inference_procedure.H_dropout for elem in flatten(hid): updates[elem] = theano_rng.binomial(p=include_prob, size=elem.shape, dtype=elem.dtype, n=1) / include_prob rval.on_load_batch = [utils.function(inputs, updates=updates)] return rval
def get_fixed_var_descr(self, model, X, Y=None): rval = FixedVarDescr() rval.fixed_vars = {'sup_aux_var': sup_counter} rval.on_load_batch = [ function([X, Y], updates=[(sup_counter, sup_counter + 1)]) ] return rval
def get_fixed_var_descr(self, model, X, Y, **kwargs): rval = FixedVarDescr() rval.fixed_vars = {'unsup_aux_var': unsup_counter} Y=T.matrix() theano_func = function([X, Y], updates=[(unsup_counter, unsup_counter + 1)]) rval.on_load_batch = [theano_func] return rval
def get_fixed_var_descr(self, model, X, Y, **kwargs): rval = FixedVarDescr() rval.fixed_vars = {'unsup_aux_var': unsup_counter} Y = T.matrix() theano_func = function([X, Y], updates=[(unsup_counter, unsup_counter + 1) ]) rval.on_load_batch = [theano_func] return rval
def get_fixed_var_descr(self, model, data): data_specs = self.get_data_specs(model) data_specs[0].validate(data) rval = FixedVarDescr() rval.fixed_vars = {'sup_aux_var': sup_counter} theano_func = function([], updates=[(sup_counter, sup_counter + 1)]) def on_load(data): theano_func() rval.on_load_batch = [on_load] return rval
def get_fixed_var_descr(self, model, data, **kwargs): data_specs = self.get_data_specs(model) data_specs[0].validate(data) rval = FixedVarDescr() rval.fixed_vars = {'unsup_aux_var': unsup_counter} # The input to function should be a flat, non-redundent tuple mapping = DataSpecsMapping(data_specs) data_tuple = mapping.flatten(data, return_tuple=True) theano_func = function([], updates=[(unsup_counter, unsup_counter + 1)]) def on_load(batch, mapping=mapping, theano_func=theano_func): return theano_func() rval.on_load_batch = [on_load] return rval
def get_fixed_var_descr(self, model, data, **kwargs): data_specs = self.get_data_specs(model) data_specs[0].validate(data) rval = FixedVarDescr() rval.fixed_vars = {'unsup_aux_var': unsup_counter} # The input to function should be a flat, non-redundent tuple mapping = DataSpecsMapping(data_specs) data_tuple = mapping.flatten(data, return_tuple=True) theano_func = function([], updates=[(unsup_counter, unsup_counter + 1) ]) def on_load(batch, mapping=mapping, theano_func=theano_func): return theano_func() rval.on_load_batch = [on_load] return rval
def get_fixed_var_descr(self, model, data): data_specs = self.get_data_specs(model) data_specs[0].validate(data) rval = FixedVarDescr() rval.fixed_vars = {'sup_aux_var': sup_counter} rval.data_specs = data_specs # data has to be flattened into a tuple before being passed # to `function`. mapping = DataSpecsMapping(data_specs) flat_data = mapping.flatten(data, return_tuple=True) theano_func = function(flat_data, updates=[(sup_counter, sup_counter + 1)]) # the on_load_batch function will take numerical data formatted # as rval.data_specs, so we have to flatten it inside the # returned function too. # Using default argument binds the variables used in the lambda # function to the value they have when the lambda is defined. on_load = (lambda batch, mapping=mapping, theano_func=theano_func: theano_func(*mapping.flatten(batch, return_tuple=True))) rval.on_load_batch = [on_load] return rval
def get_fixed_var_descr(self, model, data, **kwargs): data_specs = self.get_data_specs(model) data_specs[0].validate(data) rval = FixedVarDescr() rval.fixed_vars = {'unsup_aux_var': unsup_counter} rval.data_specs = data_specs # The input to function should be a flat, non-redundent tuple mapping = DataSpecsMapping(data_specs) data_tuple = mapping.flatten(data, return_tuple=True) theano_func = function(data_tuple, updates=[(unsup_counter, unsup_counter + 1)]) # the on_load_batch function will take numerical data formatted # as rval.data_specs, so we have to flatten it inside the # returned function too. # Using default argument binds the variables used in the lambda # function to the value they have when the lambda is defined. on_load = (lambda batch, mapping=mapping, theano_func=theano_func: theano_func(*mapping.flatten(batch, return_tuple=True))) rval.on_load_batch = [on_load] return rval
def get_fixed_var_descr(self, model, data, **kwargs): data_specs = self.get_data_specs(model) data_specs[0].validate(data) rval = FixedVarDescr() rval.fixed_vars = {'unsup_aux_var': unsup_counter} rval.data_specs = data_specs # The input to function should be a flat, non-redundent tuple mapping = DataSpecsMapping(data_specs) data_tuple = mapping.flatten(data, return_tuple=True) theano_func = function(data_tuple, updates=[(unsup_counter, unsup_counter + 1) ]) # the on_load_batch function will take numerical data formatted # as rval.data_specs, so we have to flatten it inside the # returned function too. # Using default argument binds the variables used in the lambda # function to the value they have when the lambda is defined. on_load = (lambda batch, mapping=mapping, theano_func=theano_func: theano_func(*mapping.flatten(batch, return_tuple=True))) rval.on_load_batch = [on_load] return rval
def get_fixed_var_descr(self, model, X, Y=None): rval = FixedVarDescr() rval.fixed_vars = {'sup_aux_var': sup_counter} rval.on_load_batch = [ function([X, Y], updates=[(sup_counter, sup_counter+1)])] return rval
def get_fixed_var_descr(self, model, data): """ .. todo:: WRITEME """ X, Y = data assert Y is not None batch_size = model.batch_size drop_mask_X = sharedX(model.get_input_space().get_origin_batch(batch_size)) drop_mask_X.name = "drop_mask" X_space = model.get_input_space() updates = OrderedDict() rval = FixedVarDescr() inputs = [X, Y] if not self.supervised: update_X = self.mask_gen(X, X_space=X_space) else: drop_mask_Y = sharedX(np.ones(batch_size)) drop_mask_Y.name = "drop_mask_Y" update_X, update_Y = self.mask_gen(X, Y, X_space) updates[drop_mask_Y] = update_Y rval.fixed_vars["drop_mask_Y"] = drop_mask_Y if self.mask_gen.sync_channels: n = update_X.ndim assert n == drop_mask_X.ndim - 1 update_X.name = "raw_update_X" zeros_like_X = T.zeros_like(X) zeros_like_X.name = "zeros_like_X" update_X = zeros_like_X + update_X.dimshuffle(0, 1, 2, "x") update_X.name = "update_X" updates[drop_mask_X] = update_X rval.fixed_vars["drop_mask"] = drop_mask_X if hasattr(model.inference_procedure, "V_dropout"): include_prob = model.inference_procedure.include_prob include_prob_V = model.inference_procedure.include_prob_V include_prob_Y = model.inference_procedure.include_prob_Y theano_rng = make_theano_rng(None, 2012 + 10 + 20, which_method="binomial") for elem in flatten([model.inference_procedure.V_dropout]): updates[elem] = ( theano_rng.binomial(p=include_prob_V, size=elem.shape, dtype=elem.dtype, n=1) / include_prob_V ) if "Softmax" in str(type(model.hidden_layers[-1])): hid = model.inference_procedure.H_dropout[:-1] y = model.inference_procedure.H_dropout[-1] updates[y] = theano_rng.binomial(p=include_prob_Y, size=y.shape, dtype=y.dtype, n=1) / include_prob_Y else: hid = model.inference_procedure.H_dropout for elem in flatten(hid): updates[elem] = ( theano_rng.binomial(p=include_prob, size=elem.shape, dtype=elem.dtype, n=1) / include_prob ) rval.on_load_batch = [utils.function(inputs, updates=updates)] return rval