Esempio n. 1
0
                save_checkpoint(epoch, model, optimizer, prefix)

                val_acc, val_preci, val_recall, mAP_scores = evaluate(
                    epoch, val_loader, model, bce_loss, log_writer)
                avg_map = np.mean(mAP_scores)

                if avg_map > best_mAP:
                    logger.info('mAP improved from {} to {}'.format(
                        best_mAP, avg_map))
                    best_mAP = avg_map
                    if last_model is not None:
                        os.remove(last_model)

                    fn = os.path.join(
                        c['model_dir'],
                        '{}_epoch{}_mAP{:.3}_preci{:.3}_recall{:.3}.pdparams'.
                        format(prefix, epoch, avg_map, val_preci, val_recall))
                    paddle.save(model.state_dict(), fn)
                    last_model = fn
                else:
                    logger.info(
                        f'mAP {avg_map} did not improved from {best_mAP}')

            if step % c['lr_dec_per_step'] == 0 and step != 0:
                if optimizer.get_lr() <= 3e-6:
                    factor = 0.95
                else:
                    factor = 0.1
                optimizer.set_lr(optimizer.get_lr() * factor)
                logger.info('decreased lr to {}'.format(optimizer.get_lr()))
Esempio n. 2
0
                                      step=epoch,
                                      value=avg_map)
                log_writer.add_scalar(tag="eval auc",
                                      step=epoch,
                                      value=auc_score)
                log_writer.add_scalar(tag="eval dprime",
                                      step=epoch,
                                      value=dprime)

                model.train()
                model.clear_gradients()

                if avg_map > best_mAP:
                    print('mAP improved from {} to {}'.format(
                        best_mAP, avg_map))
                    best_mAP = avg_map
                    fn = os.path.join(
                        c['model_dir'],
                        f'{prefix}_epoch{epoch}_mAP{avg_map:.3}.pdparams')
                    paddle.save(model.state_dict(), fn)
                else:
                    print(f'mAP {avg_map} did not improved from {best_mAP}')

            if step % c['lr_dec_per_step'] == 0 and step != 0:
                if optimizer.get_lr() <= 1e-6:
                    factor = 0.95
                else:
                    factor = 0.8
                optimizer.set_lr(optimizer.get_lr() * factor)
                print('decreased lr to {}'.format(optimizer.get_lr()))
Esempio n. 3
0
            msg += f', lr:{optimizer.get_lr():.2}'
            msg += f', elapsed:{elapsed:.3}h'
            msg += f', remained:{remain:.3}h'

            if batch_id % config['log_step'] == 0 and local_rank == 0:
                logger.info(msg)

            if step % config['checkpoint_step'] == 0 and local_rank == 0:
                fn = os.path.join(config['model_dir'],
                                  f'{prefix}_checkpoint_epoch{epoch}.tar')

                obj = {
                    'model': model.state_dict(),
                    'loss': loss_fn.state_dict(),
                    'opti': optimizer.state_dict(),
                    'lr': optimizer.get_lr()
                }
                paddle.save(obj, fn)

            if step != 0 and step % config[
                    'eval_step'] == 0 and local_rank == 0:

                result, min_dcf = compute_eer(config, model)
                eer = result.eer
                model.train()
                model.clear_gradients()

                if eer < best_eer:
                    logger.info('eer improved from {} to {}'.format(
                        best_eer, eer))
                    best_eer = eer