def test_adagrad(backend_default): ada = Adagrad() param = np.random.rand(200, 128) param2 = copy.deepcopy(param) grad = 0.01 * np.random.rand(200, 128) grad2 = grad / 128. states = [0.01 * np.random.rand(200, 128)] states2 = [copy.deepcopy(states[0])] states2[0][:] = states2[0] + np.square(grad2) denom = np.sqrt(states2[0] + ada.epsilon) param2[:] -= grad2 * float(ada.learning_rate) / denom param_list = [((wrap(param), wrap(grad)), [wrap(states[0])])] compare_tensors(ada, param_list, param2, tol=1e-7)
Dropout(0.8), Conv((3, 3, 128), init=gauss, strides=small, **common), Pooling(2, strides=2), Dropout(0.4), Conv((3, 3, 256), init=gauss, strides=small, **common), Dropout(0.2), Conv((2, 2, 512), init=gauss, strides=tiny, **common), Conv((2, 2, 128), init=gauss, strides=tiny, **common), DeepBiRNN(64, init=glorot, reset_cells=True, depth=5, **common), RecurrentMean(), Affine(nout=2, init=gauss, activation=Softmax()) ] }[subj] model = Model(layers=layers) opt = Adagrad(learning_rate=rate) callbacks = Callbacks(model, eval_set=test, **args.callback_args) if args.validate_mode: evaluator = Evaluator(subj, data_dir, test) callbacks.add_callback(evaluator) preds_name = 'eval.' else: preds_name = 'test.' cost = GeneralizedCost(costfunc=CrossEntropyBinary()) model.fit(tain, optimizer=opt, num_epochs=nepochs, cost=cost, callbacks=callbacks) preds = model.get_outputs(test)[:, 1]
depth=1, reset_cells=True, batch_norm=True) layers = [ LookupTable(vocab_size=vocab_size, embedding_dim=embedding_dim, init=uni), rlayer, RecurrentSum(), Dropout(keep=0.5), Affine(2, g_uni, bias=g_uni, activation=Softmax()) ] model = Model(layers=layers) cost = GeneralizedCost(costfunc=CrossEntropyMulti(usebits=True)) optimizer = Adagrad(learning_rate=0.01, gradient_clip_value=gradient_clip_value) # configure callbacks callbacks = Callbacks(model, eval_set=valid_set, **args.callback_args) # train model model.fit(train_set, optimizer=optimizer, num_epochs=args.epochs, cost=cost, callbacks=callbacks) # eval model print "Train Accuracy - ", 100 * model.eval(train_set, metric=Accuracy()) print "Test Accuracy - ", 100 * model.eval(valid_set, metric=Accuracy())
common_params = dict(sampling_freq=22050, clip_duration=16000, frame_duration=16) train_params = AudioParams(**common_params) valid_params = AudioParams(**common_params) common = dict(target_size=1, nclasses=10, repo_dir=args.data_dir) train = DataLoader(set_name='music-train', media_params=train_params, index_file=train_idx, shuffle=True, **common) valid = DataLoader(set_name='music-valid', media_params=valid_params, index_file=valid_idx, shuffle=False, **common) init = Gaussian(scale=0.01) layers = [Conv((2, 2, 4), init=init, activation=Rectlin(), strides=dict(str_h=2, str_w=4)), Pooling(2, strides=2), Conv((3, 3, 4), init=init, batch_norm=True, activation=Rectlin(), strides=dict(str_h=1, str_w=2)), DeepBiRNN(128, init=GlorotUniform(), batch_norm=True, activation=Rectlin(), reset_cells=True, depth=3), RecurrentMean(), Affine(nout=common['nclasses'], init=init, activation=Softmax())] model = Model(layers=layers) opt = Adagrad(learning_rate=0.01, gradient_clip_value=15) metric = Misclassification() callbacks = Callbacks(model, eval_set=valid, metric=metric, **args.callback_args) cost = GeneralizedCost(costfunc=CrossEntropyMulti()) model.fit(train, optimizer=opt, num_epochs=args.epochs, cost=cost, callbacks=callbacks) print('Misclassification error = %.1f%%' % (model.eval(valid, metric=metric)*100)) display(model, ['Convolution_0'], 'inputs') display(model, ['Convolution_0', 'Convolution_1', 'Pooling_0'], 'outputs')
tiny = dict(str_h=1, str_w=1) small = dict(str_h=1, str_w=2) big = dict(str_h=1, str_w=4) common = dict(batch_norm=True, activation=Rectlin()) layers = [Conv((3, 5, 64), init=gauss, activation=Rectlin(), strides=big), Pooling(2, strides=2), Conv((3, 3, 128), init=gauss, strides=small, **common), Pooling(2, strides=2), Conv((3, 3, 256), init=gauss, strides=small, **common), Conv((2, 2, 512), init=gauss, strides=tiny, **common), DeepBiRNN(128, init=glorot, reset_cells=True, depth=3, **common), RecurrentMean(), Affine(nout=2, init=gauss, activation=Softmax())] model = Model(layers=layers) opt = Adagrad(learning_rate=0.0001) callbacks = Callbacks(model, eval_set=test, **args.callback_args) cost = GeneralizedCost(costfunc=CrossEntropyBinary()) model.fit(tain, optimizer=opt, num_epochs=args.epochs, cost=cost, callbacks=callbacks) preds = model.get_outputs(test)[:, 1] if args.test_mode: preds_name = 'test.' else: preds_name = 'eval.' labels = np.loadtxt(test_idx, delimiter=',', skiprows=1, usecols=[1]) auc = metrics.roc_auc_score(labels, preds) print('Eval AUC for subject %d: %.4f' % (subj, auc)) preds_file = preds_name + str(subj) + '.' + str(args.electrode) + '.npy'
layers = [ LookupTable(vocab_size=vocab_size, embedding_dim=embedding_dim, init=init_emb), LSTM(hidden_size, init_glorot, activation=Tanh(), gate_activation=Logistic(), reset_cells=True), RecurrentSum(), Dropout(keep=0.5), Affine(2, init_glorot, bias=init_glorot, activation=Softmax()) ] cost = GeneralizedCost(costfunc=CrossEntropyMulti(usebits=True)) metric = Accuracy() model = Model(layers=layers) optimizer = Adagrad(learning_rate=0.01, clip_gradients=clip_gradients) # configure callbacks callbacks = Callbacks(model, train_set, args, valid_set=valid_set) # train model model.fit(train_set, optimizer=optimizer, num_epochs=num_epochs, cost=cost, callbacks=callbacks) # eval model print "Test Accuracy - ", 100 * model.eval(valid_set, metric=metric)
# define layers layers = [ LookupTable(vocab_size=vocab_size, embedding_dim=embedding_dim, init=init_emb, pad_idx=0, update=embedding_update), LSTM(hidden_size, init_glorot, activation=Tanh(), gate_activation=Logistic(), reset_cells=True), RecurrentSum(), Dropout(keep=0.5), Affine(nclass, init_glorot, bias=init_glorot, activation=Softmax()) ] # set the cost, metrics, optimizer cost = GeneralizedCost(costfunc=CrossEntropyMulti(usebits=True)) metric = Accuracy() model = Model(layers=layers) optimizer = Adagrad(learning_rate=0.01) # configure callbacks callbacks = Callbacks(model, eval_set=valid_set, **args.callback_args) # train model model.fit(train_set, optimizer=optimizer, num_epochs=num_epochs, cost=cost, callbacks=callbacks) # eval model print "\nTrain Accuracy -", 100 * model.eval(train_set, metric=metric) print "Test Accuracy -", 100 * model.eval(valid_set, metric=metric)