def train_recsys_rating(net, train_iter, test_iter, loss, trainer, num_epochs, devices=d2l.try_all_gpus(), evaluator=None, **kwargs): timer = d2l.Timer() animator = d2l.Animator(xlabel='epoch', xlim=[1, num_epochs], ylim=[0, 2], legend=['train loss', 'test RMSE']) for epoch in tqdm.tqdm(range(num_epochs)): metric, l = d2l.Accumulator(3), 0. for i, values in enumerate(train_iter): timer.start() input_data = [] values = values if isinstance(values, list) else [values] for v in values: input_data.append(gluon.utils.split_and_load(v, devices)) train_feat = input_data[0:-1] if len(values) > 1 else input_data train_label = input_data[-1] with autograd.record(): preds = [net(*t) for t in zip(*train_feat)] ls = [loss(p, s) for p, s in zip(preds, train_label)] [l.backward() for l in ls] l += sum([l.asnumpy() for l in ls]).mean() / len(devices) trainer.step(values[0].shape[0]) metric.add(l, values[0].shape[0], values[0].size) timer.stop() if len(kwargs) > 0: # It will be used in section AutoRec test_rmse = evaluator(net, test_iter, kwargs['inter_mat'], devices) else: test_rmse = evaluator(net, test_iter, devices) train_l = l / (i + 1) animator.add(epoch + 1, (train_l, test_rmse)) print(f'train loss {metric[0] / metric[1]:.3f}, ' f'test RMSE {test_rmse:.3f}') print(f'{metric[2] * num_epochs / timer.sum():.1f} examples/sec ' f'on {str(devices)}')
def train_ch6(net, train_iter, test_iter, num_epochs, lr, device=d2l.try_gpu()): """Train a model with a GPU (defined in Chapter 6).""" net.initialize(force_reinit=True, ctx=device, init=init.Xavier()) loss = gluon.loss.SoftmaxCrossEntropyLoss() trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': lr}) animator = d2l.Animator(xlabel='epoch', xlim=[1, num_epochs], legend=['train loss', 'train acc', 'test acc']) timer, num_batches = d2l.Timer(), len(train_iter) for epoch in range(num_epochs): # Sum of training loss, sum of training accuracy, no. of examples metric = d2l.Accumulator(3) for i, (X, y) in enumerate(train_iter): timer.start() # Here is the major difference from `d2l.train_epoch_ch3` X, y = X.as_in_ctx(device), y.as_in_ctx(device) with autograd.record(): y_hat = net(X) l = loss(y_hat, y) l.backward() trainer.step(X.shape[0]) metric.add(l.sum(), d2l.accuracy(y_hat, y), X.shape[0]) timer.stop() train_l = metric[0] / metric[2] train_acc = metric[1] / metric[2] if (i + 1) % (num_batches // 5) == 0 or i == num_batches - 1: animator.add(epoch + (i + 1) / num_batches, (train_l, train_acc, None)) test_acc = evaluate_accuracy_gpu(net, test_iter) animator.add(epoch + 1, (None, None, test_acc)) print(f'loss {train_l:.3f}, train acc {train_acc:.3f}, ' f'test acc {test_acc:.3f}') print(f'{metric[2] * num_epochs / timer.sum():.1f} examples/sec ' f'on {str(device)}')
def evaluate_loss(net, data_iter, loss): #@save """Evaluate the loss of a model on the given dataset.""" metric = d2l.Accumulator(2) # Sum of losses, no. of examples for X, y in data_iter: l = loss(net(X), y) metric.add(l.sum(), l.size) return metric[0] / metric[1]
def train(net, train_iter, valid_iter, num_epochs, lr, wd, devices, lr_period, lr_decay): trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': lr, 'momentum': 0.9, 'wd': wd}) num_batches, timer = len(train_iter), d2l.Timer() animator = d2l.Animator(xlabel='epoch', xlim=[1, num_epochs], legend=['train loss', 'train acc', 'valid acc']) for epoch in range(num_epochs): metric = d2l.Accumulator(3) if epoch > 0 and epoch % lr_period == 0: trainer.set_learning_rate(trainer.learning_rate * lr_decay) for i, (features, labels) in enumerate(train_iter): timer.start() l, acc = d2l.train_batch_ch13( net, features, labels.astype('float32'), loss, trainer, devices, d2l.split_batch) metric.add(l, acc, labels.shape[0]) timer.stop() if (i + 1) % (num_batches // 5) == 0 or i == num_batches - 1: animator.add(epoch + (i + 1) / num_batches, (metric[0] / metric[2], metric[1] / metric[2], None)) if valid_iter is not None: valid_acc = d2l.evaluate_accuracy_gpus(net, valid_iter, d2l.split_batch) animator.add(epoch + 1, (None, None, valid_acc)) if valid_iter is not None: print(f'loss {metric[0] / metric[2]:.3f}, ' f'train acc {metric[1] / metric[2]:.3f}, ' f'valid acc {valid_acc:.3f}') else: print(f'loss {metric[0] / metric[2]:.3f}, ' f'train acc {metric[1] / metric[2]:.3f}') print(f'{metric[2] * num_epochs / timer.sum():.1f} examples/sec ' f'on {str(devices)}')
def evaluate_accuracy_gpu(net, data_iter, device=None): #@save """Compute the accuracy for a model on a dataset using a GPU.""" if not device: # Query the first device where the first parameter is on device = list(net.collect_params().values())[0].list_ctx()[0] # No. of correct predictions, no. of predictions metric = d2l.Accumulator(2) for X, y in data_iter: X, y = X.as_in_ctx(device), y.as_in_ctx(device) metric.add(d2l.accuracy(net(X), y), y.size) return metric[0] / metric[1]
def train_ranking(net, train_iter, test_iter, loss, trainer, test_seq_iter, num_users, num_items, num_epochs, devices, evaluator, candidates, eval_step=1): timer, hit_rate, auc = d2l.Timer(), 0, 0 animator = d2l.Animator(xlabel='epoch', xlim=[1, num_epochs], ylim=[0, 1], legend=['test hit rate', 'test AUC']) for epoch in range(num_epochs): metric, l = d2l.Accumulator(3), 0. for i, values in enumerate(train_iter): # values: (batch_size, user_id, observered_item_id, unobserverd_item_id) input_data = [] for v in values: # values: (user_id, observered_item_id, unobserverd_item_id) input_data.append(gluon.utils.split_and_load(v, devices)) with autograd.record(): p_pos = [net(*t) for t in zip(*input_data[0:-1])] p_neg = [ net(*t) for t in zip(*input_data[0:-2], input_data[-1]) ] ls = [loss(p, n) for p, n in zip(p_pos, p_neg)] [l.backward(retain_graph=False) for l in ls] l += sum([l.asnumpy() for l in ls]).mean() / len(devices) trainer.step(values[0].shape[0]) metric.add(l, values[0].shape[0], values[0].size) timer.stop() with autograd.predict_mode(): if (epoch + 1) % eval_step == 0: hit_rate, auc = evaluator(net, test_iter, test_seq_iter, candidates, num_users, num_items, devices) animator.add(epoch + 1, (hit_rate, auc)) print(f'train loss {metric[0] / metric[1]:.3f}, ' f'test hit rate {float(hit_rate):.3f}, test AUC {float(auc):.3f}') print(f'{metric[2] * num_epochs / timer.sum():.1f} examples/sec ' f'on {str(devices)}')
def train_bert(train_iter, net, loss, vocab_size, ctx, log_interval, num_steps): trainer = gluon.Trainer(net.collect_params(), 'adam', {'learning_rate': 1e-3}) step, timer = 0, d2l.Timer() animator = d2l.Animator(xlabel='step', ylabel='loss', xlim=[1, num_steps], legend=['mlm', 'nsp']) # Sum of masked language modeling losses, sum of next sentence prediction # losses, no. of sentence pairs, count metric = d2l.Accumulator(4) num_steps_reached = False while step < num_steps and not num_steps_reached: for batch in train_iter: (tokens_X_shards, segments_X_shards, valid_lens_x_shards, pred_positions_X_shards, mlm_weights_X_shards, mlm_Y_shards, nsp_y_shards) = [ gluon.utils.split_and_load(elem, ctx, even_split=False) for elem in batch ] timer.start() with autograd.record(): mlm_ls, nsp_ls, ls = _get_batch_loss_bert( net, loss, vocab_size, tokens_X_shards, segments_X_shards, valid_lens_x_shards, pred_positions_X_shards, mlm_weights_X_shards, mlm_Y_shards, nsp_y_shards) for l in ls: l.backward() trainer.step(1) mlm_l_mean = sum([float(l) for l in mlm_ls]) / len(mlm_ls) nsp_l_mean = sum([float(l) for l in nsp_ls]) / len(nsp_ls) metric.add(mlm_l_mean, nsp_l_mean, batch[0].shape[0], 1) timer.stop() if (step + 1) % log_interval == 0: animator.add(step + 1, (metric[0] / metric[3], metric[1] / metric[3])) step += 1 if step == num_steps: num_steps_reached = True break print(f'MLM loss {metric[0] / metric[3]:.3f}, ' f'NSP loss {metric[1] / metric[3]:.3f}') print(f'{metric[2] / timer.sum():.1f} sentence pairs/sec on {str(ctx)}')
def train(net, train_iter, valid_iter, num_epochs, lr, wd, devices, lr_period, lr_decay): # Only train the small custom output network trainer = gluon.Trainer(net.output_new.collect_params(), 'sgd', {'learning_rate': lr, 'momentum': 0.9, 'wd': wd}) num_batches, timer = len(train_iter), d2l.Timer() animator = d2l.Animator(xlabel='epoch', xlim=[1, num_epochs], legend=['train loss', 'valid loss']) for epoch in range(num_epochs): print('Epoch: '+str(epoch+1)) metric = d2l.Accumulator(2) if epoch > 0 and epoch % lr_period == 0: trainer.set_learning_rate(trainer.learning_rate * lr_decay) for i, (features, labels) in enumerate(train_iter): timer.start() X_shards, y_shards = d2l.split_batch(features, labels, devices) output_features = [net.features(X_shard) for X_shard in X_shards] with autograd.record(): outputs = [net.output_new(feature) for feature in output_features] ls = [loss(output, y_shard).sum() for output, y_shard in zip(outputs, y_shards)] for l in ls: l.backward() trainer.step(batch_size) metric.add(sum([float(l.sum()) for l in ls]), labels.shape[0]) timer.stop() if (i + 1) % (num_batches // 5) == 0 or i == num_batches - 1: animator.add(epoch + (i + 1) / num_batches, (metric[0] / metric[1], None)) if valid_iter is not None: valid_loss = evaluate_loss(valid_iter, net, devices) animator.add(epoch + 1, (None, valid_loss)) if valid_iter is not None: print(f'train loss {metric[0] / metric[1]:.3f}, ' f'valid loss {valid_loss:.3f}') else: print(f'train loss {metric[0] / metric[1]:.3f}') print(f'{metric[1] * num_epochs / timer.sum():.1f} examples/sec ' f'on {str(devices)}')