def test_multi_optimizer(backend_default): opt_gdm = GradientDescentMomentum(learning_rate=0.001, momentum_coef=0.9, wdecay=0.005) opt_ada = Adadelta() opt_adam = Adam() opt_rms = RMSProp() opt_rms_1 = RMSProp(gradient_clip_value=5) init_one = Gaussian(scale=0.01) l1 = Conv((11, 11, 64), strides=4, padding=3, init=init_one, bias=Constant(0), activation=Rectlin()) l2 = Affine(nout=4096, init=init_one, bias=Constant(1), activation=Rectlin()) l3 = LSTM(output_size=1000, init=init_one, activation=Logistic(), gate_activation=Tanh()) l4 = GRU(output_size=100, init=init_one, activation=Logistic(), gate_activation=Tanh()) layers = [l1, l2, l3, l4] layer_list = [] for layer in layers: if isinstance(layer, list): layer_list.extend(layer) else: layer_list.append(layer) opt = MultiOptimizer({ 'default': opt_gdm, 'Bias': opt_ada, 'Convolution': opt_adam, 'Linear': opt_rms, 'LSTM': opt_rms_1, 'GRU': opt_rms_1 }) map_list = opt._map_optimizers(layer_list) assert map_list[opt_adam][0].__class__.__name__ == 'Convolution' assert map_list[opt_ada][0].__class__.__name__ == 'Bias' assert map_list[opt_rms][0].__class__.__name__ == 'Linear' assert map_list[opt_gdm][0].__class__.__name__ == 'Activation' assert map_list[opt_rms_1][0].__class__.__name__ == 'LSTM' assert map_list[opt_rms_1][1].__class__.__name__ == 'GRU'
def __init__(self, num_actions, args): # remember parameters self.num_actions = num_actions self.batch_size = args.batch_size self.discount_rate = args.discount_rate self.history_length = args.history_length self.screen_dim = (args.screen_height, args.screen_width) self.clip_error = args.clip_error self.min_reward = args.min_reward self.max_reward = args.max_reward self.batch_norm = args.batch_norm # create Neon backend self.be = gen_backend(backend = args.backend, batch_size = args.batch_size, rng_seed = args.random_seed, device_id = args.device_id, datatype = np.dtype(args.datatype).type, stochastic_round = args.stochastic_round) # prepare tensors once and reuse them self.input_shape = (self.history_length,) + self.screen_dim + (self.batch_size,) self.input = self.be.empty(self.input_shape) self.input.lshape = self.input_shape # HACK: needed for convolutional networks self.targets = self.be.empty((self.num_actions, self.batch_size)) # create model layers = self._createLayers(num_actions) self.model = Model(layers = layers) self.cost = GeneralizedCost(costfunc = SumSquared()) # Bug fix for l in self.model.layers.layers: l.parallelism = 'Disabled' self.model.initialize(self.input_shape[:-1], self.cost) if args.optimizer == 'rmsprop': self.optimizer = RMSProp(learning_rate = args.learning_rate, decay_rate = args.decay_rate, stochastic_round = args.stochastic_round) elif args.optimizer == 'adam': self.optimizer = Adam(learning_rate = args.learning_rate, stochastic_round = args.stochastic_round) elif args.optimizer == 'adadelta': self.optimizer = Adadelta(decay = args.decay_rate, stochastic_round = args.stochastic_round) else: assert false, "Unknown optimizer" # create target model self.train_iterations = 0 if args.target_steps: self.target_model = Model(layers = self._createLayers(num_actions)) # Bug fix for l in self.target_model.layers.layers: l.parallelism = 'Disabled' self.target_model.initialize(self.input_shape[:-1]) self.save_weights_prefix = args.save_weights_prefix else: self.target_model = self.model self.callback = None
def test_multi_optimizer(backend_default_mkl): """ A test for MultiOptimizer. """ opt_gdm = GradientDescentMomentum( learning_rate=0.001, momentum_coef=0.9, wdecay=0.005) opt_ada = Adadelta() opt_adam = Adam() opt_rms = RMSProp() opt_rms_1 = RMSProp(gradient_clip_value=5) init_one = Gaussian(scale=0.01) l1 = Conv((11, 11, 64), strides=4, padding=3, init=init_one, bias=Constant(0), activation=Rectlin()) l2 = Affine(nout=4096, init=init_one, bias=Constant(1), activation=Rectlin()) l3 = LSTM(output_size=1000, init=init_one, activation=Logistic(), gate_activation=Tanh()) l4 = GRU(output_size=100, init=init_one, activation=Logistic(), gate_activation=Tanh()) layers = [l1, l2, l3, l4] layer_list = [] for layer in layers: if isinstance(layer, list): layer_list.extend(layer) else: layer_list.append(layer) for l in layer_list: l.configure(in_obj=(16, 28, 28)) l.allocate() # separate layer_list into two, the last two recurrent layers and the rest layer_list1, layer_list2 = layer_list[:-2], layer_list[-2:] opt = MultiOptimizer({'default': opt_gdm, 'Bias': opt_ada, 'Convolution': opt_adam, 'Convolution_bias': opt_adam, 'Linear': opt_rms, 'LSTM': opt_rms_1, 'GRU': opt_rms_1}) layers_to_optimize1 = [l for l in layer_list1 if isinstance(l, ParameterLayer)] layers_to_optimize2 = [l for l in layer_list2 if isinstance(l, ParameterLayer)] opt.optimize(layers_to_optimize1, 0) assert opt.map_list[opt_adam][0].__class__.__name__ is 'Convolution_bias' assert opt.map_list[opt_rms][0].__class__.__name__ == 'Linear' opt.optimize(layers_to_optimize2, 0) assert opt.map_list[opt_rms_1][0].__class__.__name__ == 'LSTM' assert opt.map_list[opt_rms_1][1].__class__.__name__ == 'GRU'
def test_rmsprop(backend): rms = RMSProp() param = np.random.rand(200, 128) param2 = copy.deepcopy(param) grad = 0.01 * np.random.rand(200, 128) states = [0.01 * np.random.rand(200, 128)] state = states[0] decay = rms.decay_rate denom = np.sqrt(decay * state + np.square(grad) * (1.0 - decay) + rms.epsilon) + rms.epsilon param2[:] -= grad * rms.learning_rate / denom param_list = [((wrap(param), wrap(grad)), [wrap(states[0])])] compare_tensors(rms, param_list, param2, tol=1e-7)
def __init__(self, state_size, num_steers, num_speeds, args): # remember parameters self.state_size = state_size self.num_steers = num_steers self.num_speeds = num_speeds self.num_actions = num_steers + num_speeds self.num_layers = args.hidden_layers self.hidden_nodes = args.hidden_nodes self.batch_size = args.batch_size self.discount_rate = args.discount_rate self.clip_error = args.clip_error # create Neon backend self.be = gen_backend(backend = args.backend, batch_size = args.batch_size, rng_seed = args.random_seed, device_id = args.device_id, datatype = np.dtype(args.datatype).type, stochastic_round = args.stochastic_round) # prepare tensors once and reuse them self.input_shape = (self.state_size, self.batch_size) self.input = self.be.empty(self.input_shape) self.targets = self.be.empty((self.num_actions, self.batch_size)) # create model self.model = Model(layers = self._createLayers()) self.cost = GeneralizedCost(costfunc = SumSquared()) self.model.initialize(self.input_shape[:-1], self.cost) if args.optimizer == 'rmsprop': self.optimizer = RMSProp(learning_rate = args.learning_rate, decay_rate = args.decay_rate, stochastic_round = args.stochastic_round) elif args.optimizer == 'adam': self.optimizer = Adam(learning_rate = args.learning_rate, stochastic_round = args.stochastic_round) elif args.optimizer == 'adadelta': self.optimizer = Adadelta(decay = args.decay_rate, stochastic_round = args.stochastic_round) else: assert false, "Unknown optimizer" # create target model self.target_steps = args.target_steps self.train_iterations = 0 if self.target_steps: self.target_model = Model(layers = self._createLayers()) self.target_model.initialize(self.input_shape[:-1]) self.save_weights_prefix = args.save_weights_prefix else: self.target_model = self.model
def test_rmsprop_wclip(backend_default): wclip = 0.5 rms = RMSProp(param_clip_value=wclip) param = np.random.rand(200, 128) param2 = copy.deepcopy(param) grad = 0.01 * np.random.rand(200, 128) grad2 = grad / 128. states = [0.01 * np.random.rand(200, 128)] state = states[0] decay = rms.decay_rate denom = np.sqrt(decay * state + np.square(grad2) * (1.0 - decay) + rms.epsilon) + rms.epsilon param2[:] -= grad2 * float(rms.learning_rate) / denom np.clip(param2, -wclip, wclip, param2) param_list = [((wrap(param), wrap(grad)), [wrap(states[0])])] compare_tensors(rms, param_list, param2, tol=1e-7)
def __init__(self, num_actions, batch_size=32, discount_rate=0.99, history_length=4, cols=64, rows=64, clip_error=1, min_reward=-1, max_reward=1, batch_norm=False): self.num_actions = num_actions self.batch_size = batch_size self.discount_rate = discount_rate self.history_length = history_length self.board_dim = (cols, rows) self.clip_error = clip_error self.min_reward = min_reward self.max_reward = max_reward self.batch_norm = batch_norm self.be = gen_backend(backend='gpu', batch_size=self.batch_size, datatype=np.dtype('float32').type) self.input_shape = (self.history_length, ) + self.board_dim + ( self.batch_size, ) self.input = self.be.empty(self.input_shape) self.input.lshape = self.input_shape # hack from simple_dqn "needed for convolutional networks" self.targets = self.be.empty((self.num_actions, self.batch_size)) layers = self._createLayers(self.num_actions) self.model = Model(layers=layers) self.cost = GeneralizedCost(costfunc=SumSquared()) # for l in self.model.layers.layers: # l.parallelism = 'Disabled' self.model.initialize(self.input_shape[:-1], cost=self.cost) self.optimizer = RMSProp(learning_rate=0.002, decay_rate=0.95, stochastic_round=True) self.train_iterations = 0 self.target_model = Model(layers=self._createLayers(num_actions)) # for l in self.target_model.layers.layers: # l.parallelism = 'Disabled' self.target_model.initialize(self.input_shape[:-1]) self.callback = None
def _set_optimizer(self): """ Initializes the selected optimization algorithm. """ _logger.debug("Optimizer = %s" % str(self.args.optimizer)) if self.args.optimizer == 'rmsprop': self.optimizer = RMSProp( learning_rate = self.args.learning_rate, decay_rate = self.args.decay_rate, stochastic_round = self.args.stochastic_round) elif self.args.optimizer == 'adam': self.optimizer = Adam( learning_rate = self.args.learning_rate, stochastic_round = self.args.stochastic_round) elif self.args.optimizer == 'adadelta': self.optimizer = Adadelta( decay = self.args.decay_rate, stochastic_round = self.args.stochastic_round) else: assert false, "Unknown optimizer"
def __init__(self, num_actions, args): # remember parameters self.num_actions = num_actions self.batch_size = args.batch_size self.discount_rate = args.discount_rate self.history_length = args.history_length self.screen_dim = (args.screen_height, args.screen_width) self.clip_error = args.clip_error # create Neon backend self.be = gen_backend(backend=args.backend, batch_size=args.batch_size, rng_seed=args.random_seed, device_id=args.device_id, default_dtype=np.dtype(args.datatype).type, stochastic_round=args.stochastic_round) # prepare tensors once and reuse them self.input_shape = (self.history_length, ) + self.screen_dim + ( self.batch_size, ) self.tensor = self.be.empty(self.input_shape) self.tensor.lshape = self.input_shape # needed for convolutional networks self.targets = self.be.empty((self.num_actions, self.batch_size)) # create model layers = self.createLayers(num_actions) self.model = Model(layers=layers) self.cost = GeneralizedCost(costfunc=SumSquared()) self.model.initialize(self.tensor.shape[:-1], self.cost) self.optimizer = RMSProp(learning_rate=args.learning_rate, decay_rate=args.rmsprop_decay_rate, stochastic_round=args.stochastic_round) # create target model self.target_steps = args.target_steps self.train_iterations = 0 if self.target_steps: self.target_model = Model(layers=self.createLayers(num_actions)) self.target_model.initialize(self.tensor.shape[:-1]) self.save_weights_path = args.save_weights_path else: self.target_model = self.model self.callback = None
def __init__(self, args, max_action_no, batch_dimension): self.args = args self.train_batch_size = args.train_batch_size self.discount_factor = args.discount_factor self.use_gpu_replay_mem = args.use_gpu_replay_mem self.be = gen_backend(backend='gpu', batch_size=self.train_batch_size) self.input_shape = (batch_dimension[1], batch_dimension[2], batch_dimension[3], batch_dimension[0]) self.input = self.be.empty(self.input_shape) self.input.lshape = self.input_shape # HACK: needed for convolutional networks self.targets = self.be.empty((max_action_no, self.train_batch_size)) if self.use_gpu_replay_mem: self.history_buffer = self.be.zeros(batch_dimension, dtype=np.uint8) self.input_uint8 = self.be.empty(self.input_shape, dtype=np.uint8) else: self.history_buffer = np.zeros(batch_dimension, dtype=np.float32) self.train_net = Model(self.create_layers(max_action_no)) self.cost = GeneralizedCost(costfunc=SumSquared()) # Bug fix for l in self.train_net.layers.layers: l.parallelism = 'Disabled' self.train_net.initialize(self.input_shape[:-1], self.cost) self.target_net = Model(self.create_layers(max_action_no)) # Bug fix for l in self.target_net.layers.layers: l.parallelism = 'Disabled' self.target_net.initialize(self.input_shape[:-1]) if self.args.optimizer == 'Adam': # Adam self.optimizer = Adam(beta_1=args.rms_decay, beta_2=args.rms_decay, learning_rate=args.learning_rate) else: # Neon RMSProp self.optimizer = RMSProp(decay_rate=args.rms_decay, learning_rate=args.learning_rate) self.max_action_no = max_action_no self.running = True
gen_model=args.gmodel, cost_type='wasserstein', im_size=64, n_chan=3, n_noise=100, n_gen_ftr=64, n_dis_ftr=64, depth=4, n_extra_layers=4, batch_norm=True, dis_iters=5, wgan_param_clamp=0.01, wgan_train_sched=True) # setup optimizer optimizer = RMSProp(learning_rate=5e-5, decay_rate=0.99, epsilon=1e-8) # setup data provider train = make_loader(args.manifest['train'], args.manifest_root, model.be, args.subset_pct, random_seed) # configure callbacks callbacks = Callbacks(model, **args.callback_args) fdir = ensure_dirs_exist( os.path.join(os.path.dirname(os.path.realpath(__file__)), 'results/')) fname = os.path.splitext(os.path.basename(__file__))[0] +\ '_[' + datetime.now().strftime('%Y-%m-%d-%H-%M-%S') + ']' im_args = dict(filename=os.path.join(fdir, fname), hw=64, num_samples=args.batch_size, nchan=3,
train_set = Text(time_steps, train_path) valid_set = Text(time_steps, valid_path, vocab=train_set.vocab) # weight initialization init = Uniform(low=-0.08, high=0.08) # model initialization layers = [ LSTM(hidden_size, init, activation=Logistic(), gate_activation=Tanh()), Affine(len(train_set.vocab), init, bias=init, activation=Softmax()) ] model = Model(layers=layers) cost = GeneralizedCost(costfunc=CrossEntropyMulti(usebits=True)) optimizer = RMSProp(gradient_clip_value=gradient_clip_value, stochastic_round=args.rounding) # configure callbacks callbacks = Callbacks(model, eval_set=valid_set, **args.callback_args) # fit and validate model.fit(train_set, optimizer=optimizer, num_epochs=args.epochs, cost=cost, callbacks=callbacks) def sample(prob): """ Sample index from probability distribution
# weight initialization init_norm = Gaussian(loc=0.0, scale=0.01) # initialize model layers = [] layers.append(Affine(nout=100, init=init_norm, bias=Constant(0), activation=Rectlin())) layers.append(Affine(nout=10, init=init_norm, bias=Constant(0), activation=Logistic(shortcut=True), name='special_linear')) cost = GeneralizedCost(costfunc=CrossEntropyBinary()) mlp = Model(layers=layers) # fit and validate optimizer_one = GradientDescentMomentum(learning_rate=0.1, momentum_coef=0.9) optimizer_two = RMSProp() # all bias layers and the last linear layer will use # optimizer_two. all other layers will use optimizer_one. opt = MultiOptimizer({'default': optimizer_one, 'Bias': optimizer_two, 'special_linear': optimizer_two}) # configure callbacks callbacks = Callbacks(mlp, eval_set=valid_set, **args.callback_args) mlp.fit(train_set, optimizer=opt, num_epochs=args.epochs, cost=cost, callbacks=callbacks)
Affine(train_set.nfeatures, init, bias=init, activation=Identity()) ] else: layers = [ LSTM(hidden, init, activation=Logistic(), gate_activation=Tanh(), reset_cells=True), RecurrentLast(), Affine(train_set.nfeatures, init, bias=init, activation=Identity()) ] model = Model(layers=layers) cost = GeneralizedCost(MeanSquared()) optimizer = RMSProp(stochastic_round=args.rounding) callbacks = Callbacks(model, eval_set=valid_set, **args.callback_args) # fit model model.fit(train_set, optimizer=optimizer, num_epochs=args.epochs, cost=cost, callbacks=callbacks) # =======visualize how the model does on validation set============== # run the trained model on train and valid dataset and see how the outputs match train_output = model.get_outputs(train_set).reshape( -1, train_set.nfeatures) valid_output = model.get_outputs(valid_set).reshape(
MergeMultistream(layers=[image_path, sent_path], merge="recurrent"), Dropout(keep=0.5), LSTM(hidden_size, init, activation=Logistic(), gate_activation=Tanh(), reset_cells=True), Affine(train_set.vocab_size, init, bias=init2, activation=Softmax()) ] cost = GeneralizedCostMask(costfunc=CrossEntropyMulti(usebits=True)) # configure callbacks checkpoint_model_path = "~/image_caption2.pickle" if args.callback_args['save_path'] is None: args.callback_args['save_path'] = checkpoint_model_path if args.callback_args['serialize'] is None: args.callback_args['serialize'] = 1 model = Model(layers=layers) callbacks = Callbacks(model, train_set, **args.callback_args) opt = RMSProp(decay_rate=0.997, learning_rate=0.0005, epsilon=1e-8, gradient_clip_value=1) # train model model.fit(train_set, optimizer=opt, num_epochs=num_epochs, cost=cost, callbacks=callbacks) # load model (if exited) and evaluate bleu score on test set model.load_params(checkpoint_model_path) test_set = ImageCaptionTest(path=data_path) sents, targets = test_set.predict(model) test_set.bleu_score(sents, targets)
cost = GeneralizedCostMask(costfunc=CrossEntropyMulti(usebits=True)) checkpoint_model_path = "~/image_caption2.pickle" checkpoint_schedule = range(num_epochs) model = Model(layers=layers) callbacks = Callbacks(model, train_set, output_file=args.output_file, progress_bar=args.progress_bar) callbacks.add_serialize_callback(checkpoint_schedule, checkpoint_model_path) opt = RMSProp(decay_rate=0.997, learning_rate=0.0005, epsilon=1e-8, clip_gradients=True, gradient_limit=1.0) # train model model.fit(train_set, optimizer=opt, num_epochs=num_epochs, cost=cost, callbacks=callbacks) # load model (if exited) and evaluate bleu score on test set model.load_weights(checkpoint_model_path) test_set = ImageCaptionTest(path=data_path) sents, targets = test_set.predict(model) test_set.bleu_score(sents, targets)
# setup model layers layers = [ Conv((5, 5, 16), init=init_norm, activation=Rectlin()), Pooling(2), Conv((5, 5, 32), init=init_norm, activation=Rectlin()), Pooling(2), Conv((3, 3, 32), init=init_norm, activation=Rectlin()), Pooling(2), Affine(nout=100, init=init_norm, activation=Rectlin()), Linear(nout=4, init=init_norm) ] model = Model(layers=layers) # cost = GeneralizedCost(costfunc=CrossEntropyBinary()) cost = GeneralizedCost(costfunc=SumSquared()) # fit and validate optimizer = RMSProp() # configure callbacks callbacks = Callbacks(model, eval_set=eval_set, eval_freq=1) model.fit(train_set, cost=cost, optimizer=optimizer, num_epochs=10, callbacks=callbacks) y_test = model.get_outputs(test_set)
# model initialization if rlayer_type == 'lstm': rlayer = LSTM(hidden_size, init, Logistic(), Tanh()) elif rlayer_type == 'gru': rlayer = GRU(hidden_size, init, activation=Tanh(), gate_activation=Logistic()) else: raise NotImplementedError('%s layer not implemented' % rlayer_type) layers = [ rlayer, Affine(len(train_set.vocab), init, bias=init, activation=Softmax()) ] cost = GeneralizedCost(costfunc=CrossEntropyMulti(usebits=True)) model = Model(layers=layers) optimizer = RMSProp(clip_gradients=clip_gradients, stochastic_round=args.rounding) # configure callbacks callbacks = Callbacks(model, train_set, output_file=args.output_file, valid_set=valid_set, valid_freq=args.validation_freq, progress_bar=args.progress_bar) # train model model.fit(train_set, optimizer=optimizer, num_epochs=num_epochs, cost=cost, callbacks=callbacks)
from neon.optimizers import RMSProp from neon.transforms import Misclassification from neon.callbacks.callbacks import Callbacks from network import create_network from data import make_train_loader, make_val_loader eval_config = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'whale_eval.cfg') config_files = [eval_config] if os.path.exists(eval_config) else [] parser = NeonArgparser(__doc__, default_config_files=config_files) args = parser.parse_args() model, cost_obj = create_network() assert 'train' in args.manifest, "Missing train manifest" assert 'val' in args.manifest, "Missing val manifest" train = make_train_loader(args.manifest['train'], args.manifest_root, model.be, noise_file=args.manifest.get('noise')) neon_logger.display('Performing train and test in validation mode') val = make_val_loader(args.manifest['val'], args.manifest_root, model.be) metric = Misclassification() model.fit(dataset=train, cost=cost_obj, optimizer=RMSProp(learning_rate=1e-4), num_epochs=args.epochs, callbacks=Callbacks(model, eval_set=val, metric=metric, **args.callback_args)) neon_logger.display('Misclassification error = %.1f%%' % (model.eval(val, metric=metric) * 100))
init, activation=Tanh(), gate_activation=Logistic()) layers = [ rlayer1, rlayer2, Affine(len(train_set.vocab), init, bias=init, activation=Softmax()) ] cost = GeneralizedCost(costfunc=CrossEntropyMulti(usebits=True)) model = Model(layers=layers) learning_rate_sched = Schedule(list(range(10, args.epochs)), .97) optimizer = RMSProp(gradient_clip_value=gradient_clip_value, stochastic_round=args.rounding, schedule=learning_rate_sched) # configure callbacks callbacks = Callbacks(model, eval_set=valid_set, **args.callback_args) # train model model.fit(train_set, optimizer=optimizer, num_epochs=args.epochs, cost=cost, callbacks=callbacks) # get predictions ypred = model.get_outputs(valid_set) shape = (valid_set.nbatches, args.batch_size, time_steps)