Ejemplo n.º 1
0
            y_pred = classifier(x_surname=batch_dict["x_surname"])

            # step 3. compute the loss
            loss = loss_func(y_pred, batch_dict["y_nationality"])
            loss_batch = loss.item()
            running_loss += (loss_batch - running_loss) / (batch_index + 1)

            # step 4. use loss to produce gradients
            loss.backward()

            # step 5. use optimizer to take gradient step
            optimizer.step()

            # -------------------------------------------
            # compute the accuracy
            acc_batch = helper.compute_accuracy(y_pred, batch_dict["y_nationality"])
            running_acc += (acc_batch - running_acc) / (batch_index + 1)

            # update bar
            train_bar.set_postfix(loss=running_loss,
                                  acc=running_acc,
                                  epoch=epoch_index)
            train_bar.update()

        train_state["train_loss"].append(running_loss)
        train_state["train_acc"].append(running_acc)

        #
        # Iterate over val dataset
        #
Ejemplo n.º 2
0
            y_pred = classifier(x_in=batch_dict["x_data"].float())

            # step 3. compute the loss
            loss = loss_func(y_pred, batch_dict["y_target"].float())
            loss_batch = loss.item()
            running_loss += (loss_batch - running_loss) / (batch_index + 1)

            # step 4. use loss to produce gradients
            loss.backward()

            # step 5. use optimizer to take gradient step
            optimizer.step()

            # -------------------------------------------
            # compute the accuracy
            acc_batch = helper.compute_accuracy(y_pred, batch_dict["y_target"])
            running_acc += (acc_batch - running_acc) / (batch_index + 1)

            # update bar
            train_bar.set_postfix(loss=running_loss,
                                  acc=running_acc,
                                  epoch=epoch_index)
            train_bar.update()

        train_state["train_loss"].append(running_loss)
        train_state["train_acc"].append(running_acc)

        #
        # Iterate over val dataset
        #
Ejemplo n.º 3
0
                y_pred = model(batch_dict["x_source"],
                               batch_dict["x_source_length"],
                               output_sequence_size=vectorizer.max_target_length + 1,
                               sample_probability=sample_probability)
            else:
                y_pred = model(batch_dict["x_source"],
                               batch_dict["x_source_length"],
                               batch_dict["x_target"])

            loss = helper.sequence_loss(y_pred, batch_dict["y_target"], mask_index)
            loss.backward()
            optimizer.step()

            running_loss += (loss.item() - running_loss) / (batch_index + 1)

            acc_t = helper.compute_accuracy(y_pred, batch_dict["y_target"], mask_index)
            running_acc += (acc_t - running_acc) / (batch_index + 1)

            train_bar.set_postfix(loss=running_loss,
                                  acc=running_acc,
                                  epoch=epoch_index)
            train_bar.update()

        train_state["train_loss"].append(running_loss)
        train_state["train_acc"].append(running_acc)

        dataset.set_split("val")
        batch_generator = generate_nmt_batches(dataset,
                                               batch_size=args.batch_size,
                                               device=args.device)
        running_loss = 0.0