コード例 #1
0
                                loss = F.cross_entropy(logits, target)
                                pred = logits.argmax(dim=1, keepdim=True)

                                LOG_temp['acc'] += [
                                    pred.eq(target.view_as(pred)).sum().item()
                                    / pred.size(0)
                                ]
                                LOG_temp['cls_loss'] += [loss.item()]
                                LOG_temp['gen_loss'] += [
                                    F.mse_loss(x_recon, data).item()
                                ]

                            current_acc[task_t] = mean_fn(LOG_temp['acc'])

                            logging_per_task(
                                wandb, LOG, run, mode, 'acc', task, task_t,
                                np.round(np.mean(LOG_temp['acc']), 2))
                            logging_per_task(
                                wandb, LOG, run, mode, 'cls_loss', task,
                                task_t,
                                np.round(np.mean(LOG_temp['cls_loss']), 2))
                            logging_per_task(
                                wandb, LOG, run, mode, 'gen_loss', task,
                                task_t,
                                np.round(np.mean(LOG_temp['gen_loss']), 2))

                        print('\n{}:'.format(mode))
                        print(LOG[run][mode]['acc'])

                        # store the best accuracy seen so far to all the tasks
                        best_acc_yet[mode] = np.maximum(
コード例 #2
0
                                x_mean,
                                data,
                                z_mu,
                                z_var,
                                z0,
                                zk,
                                ldj,
                                args,
                                beta=args.beta)
                            LOG_temp['gen_loss'] += [gen_loss.item()]

                    # End Minibatch Eval Loop
                    #-------------------

                    logging_per_task(wandb, LOG, run, mode, 'acc', task,
                                     task_t,
                                     np.round(np.mean(LOG_temp['acc']), 2))
                    logging_per_task(
                        wandb, LOG, run, mode, 'cls_loss', task, task_t,
                        np.round(np.mean(LOG_temp['cls_loss']), 2))
                    logging_per_task(
                        wandb, LOG, run, mode, 'gen_loss', task, task_t,
                        np.round(np.mean(LOG_temp['gen_loss']), 2))

                # End Task Eval Loop
                #-------------------

                print('\n{}:'.format(mode))
                print(LOG[run][mode]['acc'])

            # End Eval Loop