Exemple #1
0
    def __init__(self, config, *args, **kwargs):
        self.config = config
        intents = discord.Intents.default()
        intents.members = True
        intents.presences = True
        app = EsiApp()
        self.esi_app = app.get_latest_swagger

        self.description = "A discord.py bot to do some stuff."

        self.token = config['bot']['token']
        self.prefix = config['bot']['prefix']
        self.started = datetime.datetime.utcnow()

        self.logger = get_logger(__name__)

        super().__init__(command_prefix=self.prefix,
                         description=self.description,
                         pm_help=None,
                         activity=discord.Activity(
                             name=config['bot']['status'],
                             type=discord.ActivityType.playing),
                         status=discord.Status.idle,
                         intents=intents,
                         *args,
                         **kwargs)

        self.loop.create_task(Tortoise.init(
            config=settings.TORTOISE_ORM))  # Connect to the database.

        # Load extensions
        try:
            self.load_extension(f'cogs.core.cog')
        except Exception as e:
            self.logger.fatal("Core cog failed to load. Exception:")
            self.logger.fatal(e)
            print(
                "Core cog could not be loaded. Please check the logs for more information."
            )

            exit(1)

        for extension in self.config['bot']['extensions']:
            try:
                self.load_extension(f'cogs.{extension}.cog')
            except Exception as e:
                self.logger.critical(f"{extension} failed to load. Exception:")
                self.logger.critical(e)
                print(
                    f"{extension} failed to load. Check logs for more details."
                )
            else:
                self.logger.info(f'{extension} loaded.')
                print(f"{extension} loaded successfully.")
Exemple #2
0
def main():
    global args, use_gpu, writer, rank, logger, best_top1, world_size, rank
    args = parser.parse_args()
    with open(args.config) as f:
        config = yaml.load(f)

    #######  visualize configs ######
    visualize_configurations(config)
    #######  set args ######
    for key in config:
        for k, v in config[key].items():
            setattr(args, k, v)
    if args.verbose:
        print('Config parsing complete')

    #######  world initial ######
    if args.distributed:
        rank, world_size = dist.dist_init(args.port, 'nccl')
        if rank == 0:
            tbpath = os.path.join(args.logpath, 'tb', args.task_name)
            if os.path.isdir(tbpath):
                writer = SummaryWriter(log_dir=tbpath)
            else:
                os.makedirs(tbpath)
                writer = SummaryWriter(log_dir=tbpath)
            writer.add_text('config_infomation', transfer_txt(args))
            logger = loggers.get_logger(
                os.path.join(args.logpath,
                             '{}.distlog'.format(args.task_name)))
            logger.info("Logger is set ")
            logger.info("Logger with distribution")
    else:

        tbpath = os.path.join(args.logpath, 'tb', args.task_name)
        if os.path.isdir(tbpath):
            writer = SummaryWriter(log_dir=tbpath)
        else:
            os.makedirs(tbpath)
            writer = SummaryWriter(log_dir=tbpath)
        writer.add_text('config_infomation', transfer_txt(args))
        logger = loggers.get_logger(
            os.path.join(args.logpath, '{}.log'.format(args.task_name)))
        logger.info("Logger is set ")
        logger.info("Logger without distribution")

    ######## initial random setting #######

    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed_all(args.seed)

    torch.backends.cudnn.benchmark = True

    ######## test data reading ########

    since = time.time()
    dataset_train_val = base_dataset.baseline_dataset(args)
    train_loader, val_loader = dataset_train_val.get_loader()
    logger.info(
        "Initializing dataset used {} basic time unit".format(time.time() -
                                                              since))

    logger.info("The training classes labels length :  {}".format(
        len(dataset_train_val.train_classnames)))
    since = time.time()
    inputs, classes = next(iter(train_loader))
    logger.info('batch loading time example is {}'.format(time.time() - since))

    ######### Init model ############
    if args.model_name == 'resnet50_middle':
        model = baseline_cls.resnet50_middle(
            len(dataset_train_val.train_classnames),
            droprate=args.dropoutrate,
            pretrain=args.pretrain,
            return_f=args.reture_bottleneck_feature,
            return_mid=args.return_middle_level_feature)
    else:
        model = baseline_cls.PCB(len(dataset_train_val.train_classnames))

    #logger.info(model)
    if args.PCB:
        model = baseline_cls.PCB(len(dataset_train_val.train_classnames))

    ########## lauch training ###########

    woptimizer = optimizers.get_optimizer(args, model)
    lr_schedular = optimizers.get_lr_scheduler(args, woptimizer)
    criterion = losses.get_loss(args)

    if args.resume != '' and os.path.isfile(args.resume):
        if args.distributed:
            if rank == 0:
                print('resuem from [%s]' % config.resume)
            checkpoint = torch.load(args.resume,
                                    map_location='cuda:%d' %
                                    torch.cuda.current_device())
        else:
            print('resuem from [%s]' % config.resume)
            checkpoint = torch.load(config.resume, map_location="cpu")

        model.load_state_dict(checkpoint['network'])
        #woptimizer.load_state_dict(checkpoint['optimizer'])
        #lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
        epoch_offset = checkpoint['epoch']
    else:
        epoch_offset = 0

    model.to(device)
    if args.distributed:
        dist.sync_state(model)

    if args.fp16:
        model, woptimizer = amp.initialize(model, woptimizer, opt_level="O1")

    for epoch in range(epoch_offset, args.epochs):

        # train
        train(args,
              train_loader,
              val_loader,
              model,
              woptimizer,
              lr_schedular,
              epoch=epoch,
              criterion=criterion)

        # validation
        cur_step = (epoch + 1) * len(train_loader)
        top1 = validate(args,
                        val_loader,
                        model,
                        epoch=epoch,
                        cur_step=cur_step,
                        criterion=criterion)

        if args.distributed:
            if rank == 0:
                if best_top1 < top1:
                    best_top1 = top1
                    save_network(args, model, epoch, top1, isbest=True)
                else:
                    if epoch % args.forcesave == 0:
                        save_network(args, model, epoch, top1)
                writer.add_scalar('val/best_top1', best_top1, cur_step)

        else:
            if best_top1 < top1:
                best_top1 = top1
                save_network(args, model, epoch, top1, isbest=True)
            else:
                if epoch % args.forcesave == 0:
                    save_network(args, model, epoch, top1)

            writer.add_scalar('val/best_top1', best_top1, cur_step)

        if args.distributed:
            if rank == 0:
                logger.info("Final best Prec@1 = {:.4%}".format(best_top1))
                #logger.info("Best Genotype = {}".format(best_genotype))
        else:
            logger.info("Final best Prec@1 = {:.4%}".format(best_top1))
Exemple #3
0
import traceback

import discord
from discord.ext import commands
from datetime import datetime
from pathlib import Path

from .models import *
from utils import checks
from utils.loggers import get_logger

logger = get_logger(__name__)


class AdminCommands(commands.Cog):
    def __init__(self, bot):
        self.bot = bot

    @commands.command(aliases=['sd'], hidden=True)
    @commands.is_owner()
    async def shutdown(self, ctx):
        """
        Commands the bot to shutdown.
        """
        logger.critical(
            f"Bot shutdown called by {ctx.author.name}#{ctx.author.discriminator} ({ctx.author.id})"
        )
        print(
            f"Bot shutdown called by {ctx.author.name}#{ctx.author.discriminator} ({ctx.author.id})"
        )
        await ctx.send("Bot shutting down.")
def main():
    global args, use_gpu, writer, rank, logger, best_top1, world_size, rank
    args = parser.parse_args()
    with open(args.config) as f:
        config = yaml.load(f)

    #######  visualize configs ######
    visualize_configurations(config)
    #######  set args ######
    for key in config:
        for k, v in config[key].items():
            setattr(args, k, v)
    if args.verbose:
        print('Config parsing complete')

    #######  world initial ######
    if args.distributed:
        rank, world_size = dist.dist_init(args.port, 'nccl')
        logger = loggers.get_logger(
            os.path.join(args.logpath, '{}.distlog'.format(args.task_name)))
        if rank == 0:
            tbpath = os.path.join(args.logpath, 'tb', args.task_name)
            if os.path.isdir(tbpath):
                writer = SummaryWriter(log_dir=tbpath)
            else:
                os.makedirs(tbpath)
                writer = SummaryWriter(log_dir=tbpath)
            writer.add_text('config_infomation', transfer_txt(args))

            logger.info("Logger is set ")
            logger.info("Logger with distribution")
    else:

        tbpath = os.path.join(args.logpath, 'tb', args.task_name)
        if os.path.isdir(tbpath):
            writer = SummaryWriter(log_dir=tbpath)
        else:
            os.makedirs(tbpath)
            writer = SummaryWriter(log_dir=tbpath)
        writer.add_text('config_infomation', transfer_txt(args))
        logger = loggers.get_logger(
            os.path.join(args.logpath, '{}.log'.format(args.task_name)))
        logger.info("Logger is set ")
        logger.info("Logger without distribution")

    ######## initial random setting #######

    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed_all(args.seed)

    torch.backends.cudnn.benchmark = True

    ######## test data reading ########

    since = time.time()
    dataset_train_val = base_dataset.baseline_dataset(args)
    train_loader, val_loader = dataset_train_val.get_loader()
    logger.info(
        "Initializing dataset used {} basic time unit".format(time.time() -
                                                              since))

    logger.info("The training classes labels length :  {}".format(
        len(dataset_train_val.train_classnames)))
    since = time.time()
    inputs, classes = next(iter(train_loader))
    logger.info('batch loading time example is {}'.format(time.time() - since))

    ######### Init model ############
    #woptimizer =  optimizers.get_optimizer(args, model)
    #lr_schedular = optimizers.get_lr_scheduler(args, woptimizer)
    criterion = losses.get_loss(args)

    criterion.to(device)

    if args.model_name == 'Darts_normal':
        model = SearchCNNController(args.input_channels, args.init_channels,
                                    len(dataset_train_val.train_classnames),
                                    args.Search_layers, criterion)
    else:
        model = SearchCNNController(args.input_channels, args.init_channels,
                                    len(dataset_train_val.train_classnames),
                                    args.Search_layers, criterion)

    model = model.to(device)
    if args.distributed:
        dist.sync_state(model)

    w_optim = torch.optim.SGD(model.weights(),
                              args.w_lr,
                              momentum=args.w_momentum,
                              weight_decay=args.w_weight_decay)

    alpha_optim = torch.optim.Adam(model.alphas(),
                                   args.alpha_lr,
                                   betas=(0.5, 0.999),
                                   weight_decay=args.alpha_weight_decay)

    lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
        w_optim, args.epochs, eta_min=args.w_lr_min)
    architect = Architect(model, args.w_momentum, args.w_weight_decay, args)

    ########## lauch training ###########

    if args.resume != '' and os.path.isfile(args.resume):
        if args.distributed:
            if rank == 0:
                print('resuem from [%s]' % config.resume)
            checkpoint = torch.load(args.resume,
                                    map_location='cuda:%d' %
                                    torch.cuda.current_device())
        else:
            print('resuem from [%s]' % config.resume)
            checkpoint = torch.load(config.resume, map_location="cpu")

        model.load_state_dict(checkpoint['network'])
        #woptimizer.load_state_dict(checkpoint['optimizer'])
        #lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
        epoch_offset = checkpoint['epoch']
    else:
        epoch_offset = 0

    model.to(device)

    if args.fp16:
        model, w_optim = amp.initialize(model, w_optim, opt_level="O1")

    for epoch in range(epoch_offset, args.epochs):
        if args.distributed:
            if rank == 0:
                model.print_alphas(logger)
        else:
            model.print_alphas(logger)

        # train
        if epoch % args.real_val_freq == 0:
            train(args,
                  train_loader,
                  val_loader,
                  model,
                  architect,
                  w_optim,
                  alpha_optim,
                  lr_scheduler,
                  epoch=epoch)
        else:
            train(args,
                  train_loader,
                  train_loader,
                  model,
                  architect,
                  w_optim,
                  alpha_optim,
                  lr_scheduler,
                  epoch=epoch)
        # validation
        cur_step = (epoch + 1) * len(train_loader)

        top1 = validate(args,
                        val_loader,
                        model,
                        epoch=epoch,
                        cur_step=cur_step)

        if args.distributed:
            if rank == 0:
                if best_top1 < top1:
                    best_top1 = top1
                    save_network(args, model, epoch, top1, isbest=True)
                else:
                    if epoch % args.forcesave == 0:
                        save_network(args, model, epoch, top1)
                writer.add_scalar('val/best_top1', best_top1, cur_step)

        else:
            if best_top1 < top1:
                best_top1 = top1
                save_network(args, model, epoch, top1, isbest=True)
            else:
                if epoch % args.forcesave == 0:
                    save_network(args, model, epoch, top1)

            writer.add_scalar('val/best_top1', best_top1, cur_step)

        if args.distributed:
            if rank == 0:
                logger.info("Final best Prec@1 = {:.4%}".format(best_top1))
                #logger.info("Best Genotype = {}".format(best_genotype))
        else:
            logger.info("Final best Prec@1 = {:.4%}".format(best_top1))

        genotype = model.genotype()

        if args.distributed:

            if rank == 0:
                logger.info("genotype = {}".format(genotype))

                if args.plot_path != False:

                    plot_path = os.path.join(args.plot_path, args.task_name,
                                             "EP{:02d}".format(epoch + 1))
                    if not os.path.isdir(
                            os.path.join(args.plot_path, args.task_name)):
                        os.makedirs(
                            os.path.join(args.plot_path, args.task_name))
                    caption = "Epoch {}".format(epoch + 1)
                    plot(genotype.normal, plot_path + "-normal", caption)
                    plot(genotype.reduce, plot_path + "-reduce", caption)

                    writer.add_image(plot_path + '.png')

        else:
            logger.info("genotype = {}".format(genotype))

            if args.plot_path != False:
                if not os.path.isdir(
                        os.path.join(args.plot_path, args.task_name)):
                    os.makedirs(os.path.join(args.plot_path, args.task_name))
                plot_path = os.path.join(args.plot_path, args.task_name,
                                         "EP{:02d}".format(epoch + 1))
                caption = "Epoch {}".format(epoch + 1)
                plot(genotype.normal, plot_path + "-normal", caption)
                plot(genotype.reduce, plot_path + "-reduce", caption)

                writer.add_image(plot_path + '.png')