Beispiel #1
0
    async def user_prefix(self, ctx, prefix=None):
        """Sets your custom user prefix!
        Your custom prefix will persist across servers and will be permanent through any crashes/downtime.
        This does not override the global prefix (a!) or any server prefixes. Instead, this is just a prefix you can use in addition to the global/server prefix.

        Setting the prefix to "none" (or any capitalization thereof) instead removes the prefix. If run without a prefix, it instead sends your current custom prefix."""
        if prefix is None:
            if ctx.message.author.id in self.bot.bot_prefixes["user"].keys():
                await ctx.send("**{}** is your custom prefix.".format(
                    self.bot.bot_prefixes["user"][ctx.message.author.id]))
            else:
                await ctx.send("You don't have a custom prefix... yet!")
        else:
            if prefix.lower() == "none":
                self.bot.bot_prefixes["user"].pop(ctx.message.author.id)
            else:
                self.bot.bot_prefixes["user"][ctx.message.author.id] = prefix

            dump_yaml(self.bot.bot_prefixes, "prefixes.yml")

            shutil.copy2(
                "prefixes.yml",
                "drive/My Drive/bot_files/among_us_bot/prefixes.yml",
            )

            await ctx.send("Done! Your custom prefix {}.".format(
                f"is now {prefix}"
                if prefix.lower() != "none" else "has been unset"))
Beispiel #2
0
    async def delete(self, ctx):
        "Deletes automatically set up categories, channels, etc."
        if ctx.guild.id in self.game_setup:
            deleting_msg = await ctx.send(
                "Deleting auto-set-up categories, channels, and roles...")
            game_setup = self.game_setup[ctx.guild.id]
            del self.game_setup[ctx.guild.id]

            for game_map in game_setup.values():
                if not isinstance(game_map, int):
                    for id in game_map["channels"].values():
                        if ctx.guild.get_channel(id) is not None:
                            if ctx.guild.get_channel(id).category is not None:
                                await ctx.guild.get_channel(
                                    id).category.delete(
                                        reason="Among Us Bot auto-deletion")
                            await ctx.guild.get_channel(id).delete(
                                reason="Among Us Bot auto-deletion")

            in_game_role = ctx.guild.get_role(int(game_setup["in_game_role"]))
            if in_game_role is not None:
                await in_game_role.delete(reason="Among Us Bot auto-deletion")

            dead_role = ctx.guild.get_role(int(game_setup["dead_role"]))
            if dead_role is not None:
                await dead_role.delete(reason="Among Us Bot auto-deletion")

            dump_yaml(self.game_setup, "game-setup.yml")
            await deleting_msg.edit(content="Done!")
            await asyncio.sleep(10)
            await deleting_msg.delete()
Beispiel #3
0
    def train(self):
        """Perform training."""
        ## archive code and configs
        if self.archive_code:
            utils.archive_code(os.path.join(self.checkpoint_dir,
                                            'archived_code.zip'),
                               filetypes=['.py', '.yml'])
        utils.dump_yaml(os.path.join(self.checkpoint_dir, 'configs.yml'),
                        self.cfgs)

        ## initialize
        start_epoch = 0
        self.metrics_trace.reset()

        self.model.to_device(self.device)
        self.model.init_optimizers()

        ## resume from checkpoint
        if self.resume:
            start_epoch = self.load_checkpoint(optim=True)

        ## initialize tensorboardX logger
        if self.use_logger:
            from tensorboardX import SummaryWriter
            self.logger = SummaryWriter(
                os.path.join(self.checkpoint_dir, 'logs',
                             datetime.now().strftime("%Y%m%d-%H%M%S")))

            ## cache one batch for visualization
            self.viz_input = self.val_loader.__iter__().__next__()

        ## run epochs
        print(
            f"{self.model.model_name}: optimizing to {self.num_epochs} epochs")
        for epoch in range(start_epoch, self.num_epochs):
            self.train_loader, self.val_loader, self.test_loader = self.get_data_loader_func(
                self.cfgs)
            self.train_iter_per_epoch = len(self.train_loader)

            self.current_epoch = epoch
            metrics = self.run_epoch(self.train_loader, epoch)
            self.metrics_trace.append("train", metrics)

            with torch.no_grad():
                metrics = self.run_epoch(self.val_loader,
                                         epoch,
                                         is_validation=True)
                self.metrics_trace.append("val", metrics)

            if (epoch + 1) % self.save_checkpoint_freq == 0:
                self.save_checkpoint(epoch + 1, optim=True)
            self.metrics_trace.plot(
                pdf_path=os.path.join(self.checkpoint_dir, 'metrics.pdf'))
            self.metrics_trace.save(
                os.path.join(self.checkpoint_dir, 'metrics.json'))

        print(f"Training completed after {epoch+1} epochs.")
Beispiel #4
0
    def _guild_prefix(self, _id, prefix):
        if prefix.lower() == "none":
            self.bot.bot_prefixes["guild"].pop(_id)

        else:
            self.bot.bot_prefixes["guild"][_id] = prefix

        dump_yaml(self.bot.bot_prefixes, "prefixes.yml")

        shutil.copy2("prefixes.yml",
                     "drive/My Drive/bot_files/among_us_bot/prefixes.yml")
Beispiel #5
0
    async def setup(self, ctx):
        'Automatically set up the game, or edit setup options manually. Only if you have the "manage channels" and "manage roles" permissions, though!'
        if ctx.invoked_subcommand is None:
            setup_msg = await ctx.send(
                "Setting up game categories, channels, and roles...")
            category_overwrites = discord.PermissionOverwrite(
                **{k: v
                   for k, v in iter(discord.Permissions.none())})
            in_game_role = await ctx.guild.create_role(
                name="In-Game", reason="Among Us Bot setup")
            dead_role = await ctx.guild.create_role(
                name="Dead", reason="Among Us Bot setup")
            game_setup = {
                "in_game_role": in_game_role.id,
                "dead_role": dead_role.id,
            }

            for map_name, game_map in self.maps.items():
                category = await ctx.guild.create_category(
                    map_name,
                    overwrites={
                        role: category_overwrites
                        for role in ctx.guild.roles
                    },
                    reason="Among Us Bot setup",
                )
                meeting_chan = await category.create_text_channel(
                    "meeting-chat", reason="Among Us Bot setup")
                room_chans = {"Meeting": meeting_chan.id}

                for room in game_map.keys():
                    room_chan = await category.create_text_channel(
                        "-".join(room.lower().split(" ")),
                        reason="Among Us Bot setup",
                    )
                    room_chans[room] = room_chan.id

                game_setup[map_name] = {
                    "category": category.id,
                    "channels": room_chans,
                }

            self.game_setup[ctx.guild.id] = game_setup

            dump_yaml(self.game_setup, "game-setup.yml")
            await setup_msg.edit(content="Done setting up!")
            await asyncio.sleep(10)
            await setup_msg.delete()
Beispiel #6
0
    def create_on_demand_configuration(self, group):
        nodegroup = deepcopy(self.nodegroupDefaults)
        recursive_dict_copy(group['nodegroupOverrides'], nodegroup)
        nodegroup['availabilityZones'] = group['availabilityZones']
        if group['instances']:
            if len(group['instances']) > 1:
                raise Exception("""
                                Cannot create an on-demand nodegroup with more
                                than one instance type!\nGroup causing error:\n
                                """ + dump_yaml(group))
            else:
                nodegroup['instanceType'] = group['instances'][0]
        else:
            raise Exception("No instances in group!\nGroup causing error:\n" +
                            dump_yaml(group))

        return nodegroup
Beispiel #7
0
 def dump_eksctl_config(self):
     if self.eksctl_config:
         return dump_yaml(self.eksctl_config)
     else:
         print("eksctl configuration not set!", file=sys.stderr)
         print("Trying now...", file=sys.stderr)
         self.create_eksctl_config()
         return self.dump_eksctl_config()
Beispiel #8
0
	def _dump_configuration(self):
		"""
		Writes the active configuration to the configuration file.
		"""

		config = {
			'physical_ports': {
				'incoming': self._incoming_port,
				'outgoing': self._outgoing_port,
			},
			'mode': self._mode.name,
			'flow_active_time_secs': self._flow_active_time_secs,
			'time_to_keep_stats_secs': self._time_to_keep_stats_secs,
			'flow_starts_retrieval_interval_secs': self._flow_starts_retrieval_interval_secs,
			'firewall_dpid': self._firewall_dpid,
			'blacklist_rules': [rule.as_dict() for rule in self._blacklist_rules],
			'whitelist_rules': [rule.as_dict() for rule in self._whitelist_rules],
		}

		utils.dump_yaml(self.CONFIG_FILE_PATH, config)
Beispiel #9
0
    def _create_out_dir(self):
        mkdirne(self.args.save_dir)
        timestamp = datetime.now().strftime('%y%m%d_%H%M')
        tag = self.args.tag

        commit = subprocess.run(
            ['git', 'rev-parse', 'HEAD'],
            stdout=subprocess.PIPE).stdout.decode('utf-8')[:6]
        if commit == '':
            commit = 'nohead'

        out_dir = os.path.join(
            self.args.save_dir, '_'.join(
                (timestamp, commit, self.args.model, tag)))
        mkdirne(out_dir)

        cfg_file = os.path.join(out_dir, 'config.yml')
        dump_yaml(self.args, cfg_file)

        return out_dir
Beispiel #10
0
    def _dump_configuration(self):
        """
		Writes the active configuration to the configuration file.
		"""

        config = {
            'physical_ports': {
                'incoming': self._incoming_port,
                'outgoing': self._outgoing_port,
            },
            'mode': self._mode.name,
            'flow_active_time_secs': self._flow_active_time_secs,
            'time_to_keep_stats_secs': self._time_to_keep_stats_secs,
            'flow_starts_retrieval_interval_secs':
            self._flow_starts_retrieval_interval_secs,
            'firewall_dpid': self._firewall_dpid,
            'blacklist_rules':
            [rule.as_dict() for rule in self._blacklist_rules],
            'whitelist_rules':
            [rule.as_dict() for rule in self._whitelist_rules],
        }

        utils.dump_yaml(self.CONFIG_FILE_PATH, config)
Beispiel #11
0
def prepare_model(args, vocabs, resume_from=None):
    if resume_from is None:
        resume_from = dict()

    model_path = args.model_path
    if resume_from.get("model_args") is not None:
        temp_path = tempfile.mkstemp()[1]
        utils.dump_yaml(resume_from["model_args"], temp_path)

    torchmodels.register_packages(models)
    mdl_cls = torchmodels.create_model_cls(models.jlu, model_path)
    mdl = mdl_cls(
        hidden_dim=args.hidden_dim,
        word_dim=args.word_dim,
        num_words=len(vocabs[0]),
        num_slots=len(vocabs[1]),
        num_intents=len(vocabs[2])
    )
    mdl.reset_parameters()
    if resume_from.get("model") is not None:
        mdl.load_state_dict(resume_from["model"])
    else:
        embeds.load_embeddings(args, vocabs[0], mdl.embeddings())
    return mdl
Beispiel #12
0
def _add_instance(server):
    """
    Given a server dict from the layout, merge configuration from the YAML into
    the defaults provided in defaults.py.

    Returns a dict that looks like {label: data}
    """
    try:
        label = server['label']
    except KeyError:
        raise ConfigurationError('Server without a label encountered! '
                                 'Data was:\n'
                                 '{}'.format(utils.dump_yaml(server)))

    # Apply extra config from yaml file
    extra_info = server.get('instance_info', {})
    filtered_info = _filter_info(extra_info)
    instance_data = utils.update_recursive(DEFAULT_INSTANCE, filtered_info)
    log.debug(utils.dump_json(instance_data))

    # Logic for special cases
    az = extra_info.get('availability_zone', 'us-east-1a')
    instance_data['subnet_id'] = \
        '${{terraform_remote_state.subnets.output.{}}}'.format(az)

    instance_data['tags'].update({'label': label})
    instance_data['tags'].update({'id': label + '_${var.run_id}'})

    ebs_data = _get_ebs_block_devices(extra_info)
    log.debug(utils.dump_json(ebs_data))
    instance_data.update(ebs_data)

    provisioners = _get_provisioners(extra_info, label)
    log.debug(utils.dump_json(provisioners))
    instance_data.update(provisioners)

    security_groups = _get_instance_security_groups(extra_info)
    log.debug(utils.dump_json(security_groups))
    instance_data.update(security_groups)

    return {label: instance_data}
Beispiel #13
0
def _add_instance(server):
    """
    Given a server dict from the layout, merge configuration from the YAML into
    the defaults provided in defaults.py.

    Returns a dict that looks like {label: data}
    """
    try:
        label = server['label']
    except KeyError:
        raise ConfigurationError('Server without a label encountered! '
                                 'Data was:\n'
                                 '{}'.format(utils.dump_yaml(server)))

    # Apply extra config from yaml file
    extra_info = server.get('instance_info', {})
    filtered_info = _filter_info(extra_info)
    instance_data = utils.update_recursive(DEFAULT_INSTANCE, filtered_info)
    log.debug(utils.dump_json(instance_data))

    # Logic for special cases
    az = extra_info.get('availability_zone', 'us-east-1a')
    instance_data['subnet_id'] = \
        '${{terraform_remote_state.subnets.output.{}}}'.format(az)

    instance_data['tags'].update({'label': label})
    instance_data['tags'].update({'id': label + '_${var.run_id}'})

    ebs_data = _get_ebs_block_devices(extra_info)
    log.debug(utils.dump_json(ebs_data))
    instance_data.update(ebs_data)

    provisioners = _get_provisioners(extra_info, label)
    log.debug(utils.dump_json(provisioners))
    instance_data.update(provisioners)

    security_groups = _get_instance_security_groups(extra_info)
    log.debug(utils.dump_json(security_groups))
    instance_data.update(security_groups)

    return {label: instance_data}
Beispiel #14
0
def view_task(task_id, fmt, dir, ext):
    task_id = int(task_id)
    data = w.get_task(id=task_id)[1]
    uuid = data['uuid']

    with open( '.'.join([os.path.join(dir, uuid), ext])) as f:
        notation = f.read()

    if fmt == 'note':
        logger.info('returning note text for {0}'.format(task_id))
        print( notation )
    else:
        logger.info('returning full task information for {0} in {1} format.'.format(task_id, fmt))
        if fmt == 'yaml':
            data['notation'] = '\n' + notation
            output = dump_yaml(data)

        elif fmt == 'json':
            data['notation'] = notation
            output = json.dumps(data, indent=3)

        print( output )
Beispiel #15
0
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    GNU General Public License for more details.

    You should have received a copy of the GNU General Public License
    along with this program.  If not, see <https://www.gnu.org/licenses/>.
"""

import discord, os, asyncio
from discord.ext import commands
from utils import load_yaml, dump_yaml
from checks import is_bot_owner


# Code to run at startup
if not os.path.exists("prefixes.yml"):
    dump_yaml({"global": "a!", "guild": {}, "user": {}}, "prefixes.yml")

# Load prefixes
bot_prefixes = load_yaml("prefixes.yml")
print(bot_prefixes)

config = load_yaml("config.yml")

# Gets possible prefixes to use
async def prefix_callable(bot, message):
    guild = message.guild
    prefix_results = []
    if guild:
        if str(guild.id) in bot_prefixes["guild"].keys():
            prefix_results.append(bot_prefixes["guild"][str(guild.id)])
                f'{name}_loss{valid_loss:.5f}_epoch{str(epoch).zfill(3)}.pth'
            )
            torch.save(trainer.weights, path)

        scheduler.step()  # type: ignore

        logger.info(f'EPOCH: [{epoch}/{cfg.num_epochs}]')
        logger.info(
            f'TRAIN LOSS: {train_loss:.8f}, VALID LOSS: {valid_loss:.8f}'
        )
        logger.info(
            f'TRAIN mIoU: {train_iou:.8f}, VALID mIoU: {valid_iou:.8f}'
        )

    cfg_path: str = os.path.join(save_dir, 'config.yml')
    utils.dump_yaml(cfg_path, cfg.todict())

    # Plot metrics
    plt.plot(metrics['train_loss'], label='train')
    plt.plot(metrics['valid_loss'], label='valid')
    plt.title('Loss curve')
    plt.legend()
    plt.savefig(os.path.join(save_dir, 'loss.png'))
    plt.clf()

    plt.plot(metrics['train_iou'], label='train')
    plt.plot(metrics['valid_iou'], label='valid')
    plt.title('mIoU curve')
    plt.legend()
    plt.savefig(os.path.join(save_dir, 'mIoU.png'))
Beispiel #17
0
    def create_spot_configuration(self, group):
        nodegroup = deepcopy(self.nodegroupDefaults)
        recursive_dict_copy(group['nodegroupOverrides'], nodegroup)
        nodegroup['availabilityZones'] = group['availabilityZones']
        if group['instances']:
            if 'instancesDistribution' not in nodegroup.keys():
                nodegroup['instancesDistribution'] = {}

            instances_distribution = nodegroup['instancesDistribution']
            if 'onDemandBaseCapacity' not in instances_distribution.keys():
                instances_distribution['onDemandBaseCapacity'] = 0
            if 'onDemandPercentageAboveBaseCapacity' not in instances_distribution.keys(
            ):
                instances_distribution[
                    'onDemandPercentageAboveBaseCapacity'] = 0

            instances_distribution['instanceTypes'] = group['instances']

            # find maximum price among the Spot prices of all instances in this group
            max_prices = []
            for instance in group['instances']:
                max_prices.append(self.region_information[instance]
                                  ['spot_pricing']['maxPrice'])
            max_price = max(max_prices)
            # set maximum price and over pay by a bit
            instances_distribution['maxPrice'] = max_price * (
                1. + self.config['overPayBy'] / 100)

            # hack to get around spot instances with only 1 instance in its group
            # include the most expensive instance in its family and keep the max price
            # the same so that the expensive one is never scheduled
            if len(group['instances']) == 1:
                family_prices = []
                for instance, instance_information in self.region_information.items(
                ):
                    instance_family = instance.split(".")[0]
                    group_family = group['instances'][0].split(".")[0]
                    same_family = instance_family == group_family
                    if same_family:
                        family_prices.append(
                            (instance, self.region_information[instance]
                             ['spot_pricing']['maxPrice']))

                most_expensive_in_family = sorted(family_prices,
                                                  key=lambda x: x[1],
                                                  reverse=True)[0]
                most_expensive_in_family_instance_name = most_expensive_in_family[
                    0]

                instances_distribution['instanceTypes'].append(
                    most_expensive_in_family_instance_name)
                # hack to avoid nodegroup with two of the most expensive instances
                # ...just don't allow that as a spot configuration
                if group['instances'][
                        0] == most_expensive_in_family_instance_name:
                    print(
                        f"WARNING: instance {group['instances'][0]} is the most expensive in it's family and cannot be in a spot group by itself!",
                        file=sys.stderr)
                    return None
        else:
            raise Exception("No instances in group!\nGroup causing error:\n" +
                            dump_yaml(group))

        return nodegroup
Beispiel #18
0
            train_losses[k].append(v)
        train_losses['loss_sum'].append(sum([v for v in train_loss.values()]))
        for k, v in valid_loss.items():
            valid_losses[k].append(v)
        valid_losses['loss_sum'].append(sum([v for v in valid_loss.values()]))

        logger.info(f'EPOCH: [{epoch + 1}/{cfg.num_epochs}]')
        logger.info('TRAIN_LOSS:')
        logger.info(train_loss)
        logger.info('VALID_LOSS')
        logger.info(valid_loss)

    weights_path: str = os.path.join(save_dir, 'weights', 'weights.pth')
    torch.save(model.state_dict(), weights_path)

    utils.dump_yaml(os.path.join(save_dir, 'config.yml'), cfg.todict())
    # Plot metrics
    keys: Tuple[str, ...] = ('loss_classifier', 'loss_box_reg', 'loss_mask',
                             'loss_objectness', 'loss_rpn_box_reg', 'loss_sum')
    plt.tight_layout()
    plt.figure(figsize=(12, 8))

    for idx, key in enumerate(keys):
        plt.subplot(2, 3, idx + 1)
        plt.plot(train_losses[key], label='train')
        plt.plot(valid_losses[key], label='valid')
        plt.title(key)
        plt.legend()
    plt.savefig(os.path.join(save_dir, 'fig.png'))
    plt.clf()
Beispiel #19
0
def render_task_list(query, dir, ext, fmt):
    if fmt in ['note', 'yaml']:
        print(dump_yaml(list_tasks(query, dir, ext)))
    elif fmt == 'json':
        for doc in list_tasks(query, dir, ext):
            print(json.dumps(doc, indent=2))
Beispiel #20
0
try:
    exp_name = utils.make_experiment_name(args.debug)
    result_dir = utils.RESULTS_BASE_DIR / exp_name
    os.mkdir(result_dir)

    logger = mylogger.get_mylogger(filename=result_dir / 'log')
    sandesh.send(f'start: {exp_name}')
    logger.debug(f'created: {result_dir}')
    logger.debug('loading data ...')

    train_feat_path = utils.FEATURE_DIR / 'baseline_features.pkl'
    X = utils.load_pickle(train_feat_path)
    print(X.columns)

    features_list = utils.load_yaml(args.feature)
    utils.dump_yaml(features_list, result_dir / 'features_list.yml')
    all_features = features_list['features']['original'] + \
        features_list['features']['generated']
    categorical_feat = features_list['categorical_features']

    logger.debug(all_features)
    logger.debug(f'features num: {len(all_features)}')
    utils.dump_yaml(features_list, result_dir / 'features_list.yml')

    # X_test = X_test[all_features]

    # sandesh.send(args.config)
    config = utils.load_yaml(args.config)
    logger.debug(config)
    utils.dump_yaml(config, result_dir / 'model_config.yml')
    model_params = config['model_params']
Beispiel #21
0
def dump_defaults(defaults):
    dump_yaml(DEFAULTS_PATH, defaults)
Beispiel #22
0
def check_perms(ctx, command_name=None, cog_name=None, suppress=False):
    ret = False

    # inits
    GLOBALS = utils.load_yaml(utils.GLOBAL_PERMS_FILE)
    cmd = ctx.command.name if command_name is None else command_name
    if cog_name is None:
        cog = ctx.cog.qualified_name if ctx.cog is not None else "none"
    else:
        cog = cog_name

    # check global admin
    if _is_global_admin(ctx, GLOBALS):
        ret |= True

    # if dm, check dm perms in global_perms file, else guild perms file
    if ctx.guild is None:
        ret |= _check(cmd=cmd,
                      cog=cog,
                      perm_dict=GLOBALS['dm'],
                      flags=GLOBALS['flags'],
                      ctx=ctx,
                      is_dm=True)
        utils.dump_yaml(GLOBALS, utils.GLOBAL_PERMS_FILE)
    else:
        # check guild owner
        if ctx.author.id == ctx.guild.owner.id:
            ret |= True

        # check guild admin
        member = ctx.guild.get_member(ctx.author.id)
        perms = member.permissions_in(ctx.channel)
        if perms.administrator:
            ret |= True

        # load guild perms
        perms_file = f"{utils.PERMS_DIR}{str(ctx.guild.id)}.yaml"
        if os.path.exists(perms_file):
            perms_dict = utils.load_yaml(perms_file)
        else:
            perms_dict = GLOBALS['default_perms']
            utils.dump_yaml(perms_dict, perms_file)

        # check guild perms
        if not suppress and not ret:
            ret |= _check(cmd=cmd,
                          cog=cog,
                          perm_dict=perms_dict,
                          flags=perms_dict['flags'],
                          ctx=ctx,
                          is_dm=False)
        else:
            try:
                ret |= _check(cmd=cmd,
                              cog=cog,
                              perm_dict=perms_dict,
                              flags=perms_dict['flags'],
                              ctx=ctx,
                              is_dm=False)
            except PermissionError:
                ret |= False

        utils.dump_yaml(perms_dict, perms_file)

    return ret