예제 #1
0
    def main():
        Configuration.load()
        logging.info("configuration loaded")
        di.MessageFactory.init_parameters()
        setup_simulation()
        logging.info("simulation runtime built")

        setup_jid = Configuration.parameters['userjid'] + "/setupmodule"
        password = Configuration.parameters['xmpp_password']
        start_disp()
        setupmodule = sm.SetupModule(setup_jid, password)
        setupmodule.start()
예제 #2
0
파일: csrf.py 프로젝트: Talung/PimpMyBot
def generate_csrf_token():
    """
    Generate and set new CSRF token in cookie. The generated token is set to
    ``request.csrf_token`` attribute for easier access by other functions.
    It is generally not necessary to use this function directly.
    .. warning::
       This function uses ``os.urandom()`` call to obtain 8 random bytes when
       generating the token. It is possible to deplete the randomness pool and
       make the random token predictable.

    From https://github.com/Outernet-Project/bottle-utils/blob/master/bottle_utils/csrf.py
    """
    global SECRET

    if SECRET is None:
        # Avoid circular import
        from utils.config import Configuration
        SECRET = Configuration.get().secret

    sha256 = hashlib.sha256()
    sha256.update(os.urandom(8))
    token = sha256.hexdigest().encode(ENCODING)
    response.set_cookie(CSRF_TOKEN, token, path=ROOT,
                        secret=SECRET, max_age=EXPIRES)
    request.csrf_token = token.decode(ENCODING)
예제 #3
0
파일: modules.py 프로젝트: Gagaro/PimpMyBot
def get_apis():
    """ Get all the available methods exposed by activated modules. """
    apis = {"__pmb": {"title": "Pimp My Bot", "api": pmb_api}}
    for module in Configuration.get().get_activated_modules():
        module = module.get_module()
        if module.api:
            apis[module.identifier] = {"title": module.title, "api": module.api}
    return apis
예제 #4
0
    def config(self):
        if self._config is not None:
            return self._config

        from utils.config import Configuration, ModuleConfiguration

        configuration = Configuration.get()
        self._config = ModuleConfiguration.get_or_create(identifier=self.identifier, configuration=configuration)[0]
        return self._config
예제 #5
0
파일: client.py 프로젝트: Talung/PimpMyBot
 def __init__(self, pipe):
     self.pipe = pipe
     self.config = Configuration.get()
     self.socket = None
     self.handlers = []
     self.add_handler(handle_commands)
     self.sender = Sender(self.config.channel)
     self.modules = {}
     self.load_modules()
예제 #6
0
def construct_model(conf, model_name, **kwargs):
  params = conf.to_param_dict(REQUIRED_PARAMS, OPTIONAL_PARAMS, KEY_RENAMES)
  model_conf = Configuration.from_dict(params['pretrained_model_conf'], conf)
  params['pretrained_model_conf'] = model_conf
  model_conf = Configuration.from_dict(params['learnable_model_conf'], conf)
  params['learnable_model_conf'] = model_conf

  model = RefinementWrapper(**params)
  initialize_pretrained_model(params['pretrained_model_conf'],
                              model.pretrained_model,
                              kwargs['cuda'], conf.file)

  if params.get('freeze_pretrained_model', True):
    # Freeze pretrained model
    for param in model.pretrained_model.parameters():
      param.requires_grad = False

  return model
예제 #7
0
파일: csrf.py 프로젝트: Gagaro/PimpMyBot
def get_secret():
    """ Get secret """
    global SECRET

    if SECRET is None:
        # Avoid circular import
        from utils.config import Configuration
        SECRET = Configuration.get().secret
    return SECRET
예제 #8
0
def configuration_view_post():
    configuration = Configuration.get()
    configuration.username = request.forms['username']
    configuration.oauth = request.forms['oauth']
    configuration.channel = request.forms['channel']
    configuration.lang = request.forms['lang']
    configuration.save()
    success('Configuration saved')
    return {
        'config': configuration,
        'languages': languages,
    }
예제 #9
0
파일: app.py 프로젝트: Gagaro/PimpMyBot
    def update_db(self):
        """ Install or upgrade DB if needed. """
        if 'configuration' not in db.get_tables():
            # The database has not been created yet, let's do it.
            from core_modules import install_core_modules

            db.create_tables([
                Configuration, ModuleConfiguration, WidgetConfiguration,
                Command, Action, CommandAction,
            ])
            Configuration.create(
                secret=hashlib.sha256(os.urandom(16)).hexdigest(),
                upgrades=len(upgrades),
            )
            install_core_modules()
        else:
            # Upgrade if needed
            upgrades_done = Configuration.select(Configuration.upgrades).get().upgrades
            if upgrades_done < len(upgrades):
                for upgrade in upgrades[upgrades_done:]:
                    upgrade()
                Configuration.update(upgrades=len(upgrades)).execute()
예제 #10
0
def _get_segmentation_score_metric(conf, metric_name, cuda):
    from metrics.segmentation_score import SegmentationScore
    assert conf.has_attr('segmentation_score_metric'), \
        ('Segmentation score metric needs additional config '
         'under key "segmentation_score_metric"')

    metric_conf = conf.segmentation_score_metric
    model_conf = Configuration.from_dict(metric_conf['model'])
    dice_score_class = metric_conf.get('class')
    save_segmentations_path = metric_conf.get('save_segmentations_path')
    skip_empty_images = metric_conf.get('skip_empty_images', False)

    return SegmentationScore(model_conf, conf.file, cuda, dice_score_class,
                             save_segmentations_path, skip_empty_images)
예제 #11
0
def configuration_view_post():
    configuration = Configuration.get()
    configuration.username = request.forms['username']
    configuration.oauth = request.forms['oauth']
    configuration.channel = request.forms['channel']
    configuration.lang = request.forms['lang']
    configuration.send_as_me = 'send_as_me' in request.forms.keys()
    configuration.save()
    success('Configuration saved')
    # TODO send configuration reload required to IRC client
    return {
        'config': configuration,
        'languages': languages,
    }
예제 #12
0
파일: modules.py 프로젝트: Gagaro/PimpMyBot
def get_dashboard():
    """ Get all widgets in the correct columns. """
    dashboard = {"deactivated": [], "left": [], "middle": [], "right": []}
    for module in Configuration.get().get_activated_modules():
        for identifier, widget in module.get_module().widgets.items():
            config = WidgetConfiguration.get_or_create(identifier=identifier)[0]
            widget.update({"config": config, "identifier": identifier})
            if config.column in ["left", "middle", "right"]:
                dashboard[config.column].append(widget)
            else:
                dashboard["deactivated"].append(widget)
    for column, widgets_list in dashboard.items():
        dashboard[column] = sorted(widgets_list, key=lambda d: d["config"].order)
    return dashboard
예제 #13
0
    def test_configuration_post(self):
        from utils.config import Configuration

        self.app.post(app.get_url('configuration'), {
            'username': '******',
            'oauth': 'OAUTH',
            'channel': 'CHANNEL',
            'lang': 'en_US',
        })

        config = Configuration.get()
        self.assertEqual(config.username, 'USERNAME')
        self.assertEqual(config.oauth, 'OAUTH')
        self.assertEqual(config.channel, 'CHANNEL')
예제 #14
0
파일: bottle.py 프로젝트: Gagaro/PimpMyBot
    def prepare(self, *args, **kwargs):
        from babel.support import Translations
        from utils.translations import TRANSLATIONS_DIR
        from utils.config import Configuration

        translation = Translations.load(
            TRANSLATIONS_DIR,
            locales=Configuration.get().lang,
            domain='pimpmybot'
        )
        kwargs['filters'] = {
            'datetimeformat': datetimeformat
        }
        super(PmbJinja2Template, self).prepare(*args, **kwargs)
        self.env.install_gettext_translations(translation)
예제 #15
0
파일: api.py 프로젝트: Talung/PimpMyBot
    def oauth(cls):
        if cls._oauth is None:
            from utils.config import Configuration

            cls._oauth = Configuration.get().oauth
        return cls._oauth
예제 #16
0
def main(argv):
    args = parser.parse_args(argv)

    if args.cuda != '':
        try:
            args.cuda = utils.set_cuda_env(args.cuda)
        except Exception:
            print('No free GPU on this machine. Aborting run.')
            return
        print('Running on GPU {}'.format(args.cuda))

    # Load configuration
    conf = Configuration.from_json(args.config)
    conf.args = args
    if args.conf:
        new_conf_entries = {}
        for arg in args.conf:
            key, value = arg.split('=')
            new_conf_entries[key] = value
        conf.update(new_conf_entries)
    if args.verbose:
        print(conf)

    utils.set_random_seeds(conf.seed)

    # Setup model
    runner = build_runner(conf,
                          conf.runner_type,
                          args.cuda,
                          mode='train',
                          resume=args.resume is not None)

    if args.print_model:
        print(str(runner))

    # Handle resuming from checkpoint
    restore_state = None
    if args.resume:
        if os.path.exists(args.resume):
            restore_state = restore_checkpoint(args.resume, runner)
            conf.run_dir = os.path.dirname(args.resume)
            print('Restored checkpoint from {}'.format(args.resume))
        else:
            print('Checkpoint {} to restore from not found'.format(
                args.resume))
            return

    # Setup log directory
    if args.run_dir:
        conf.run_dir = args.run_dir
    if not conf.has_attr('run_dir'):
        run_name = conf.get_attr('run_name', default='unnamed_run')
        conf.run_dir = get_run_dir(args.log_dir, run_name)
    if not args.dry:
        if not os.path.isdir(conf.run_dir):
            os.mkdir(conf.run_dir)
        print('This run is saved to: {}'.format(conf.run_dir))
        config_path = get_config_path(conf.run_dir)
        conf.serialize(config_path)

    use_tensorboard = conf.get_attr('use_tensorboard',
                                    default=DEFAULT_USE_TENSORBOARD)
    if use_tensorboard and not args.dry:
        from tensorboardX import SummaryWriter
        summary_writer = SummaryWriter(conf.run_dir)
    else:
        summary_writer = None

    # Load datasets
    num_workers = conf.get_attr('num_data_workers',
                                default=DEFAULT_NUM_WORKERS)
    num_train_samples = conf.get_attr('num_train_subset_samples', default=None)
    num_val_samples = conf.get_attr('num_validation_subset_samples',
                                    default=None)

    train_dataset_name = conf.get_attr('train_dataset', alternative='dataset')
    train_dataset = load_dataset(conf, args.data_dir, train_dataset_name,
                                 'train')
    train_sampler = maybe_get_subset_sampler(num_train_samples, train_dataset)
    train_loader = DataLoader(dataset=train_dataset,
                              num_workers=num_workers,
                              batch_size=conf.batch_size,
                              sampler=train_sampler,
                              shuffle=train_sampler is None)

    val_dataset_name = conf.get_attr('validation_dataset',
                                     alternative='dataset')
    val_dataset = load_dataset(conf, args.data_dir, val_dataset_name, 'val')
    val_sampler = maybe_get_subset_sampler(num_val_samples, val_dataset)
    val_loader = DataLoader(dataset=val_dataset,
                            num_workers=num_workers,
                            batch_size=conf.get_attr('validation_batch_size',
                                                     default=conf.batch_size),
                            sampler=val_sampler,
                            shuffle=False)

    chkpt_metrics = conf.get_attr('validation_checkpoint_metrics', default=[])
    chkpt_metric_dirs = {
        metric: os.path.join(conf.run_dir, 'best_' + metric)
        for metric in chkpt_metrics
    }
    for metric_dir in chkpt_metric_dirs.values():
        if not args.dry and not os.path.isdir(metric_dir):
            os.mkdir(metric_dir)

    # Train
    try:
        train_net(conf, runner, train_loader, val_loader, args.cuda,
                  chkpt_metric_dirs, restore_state, summary_writer)
    except KeyboardInterrupt:
        if summary_writer is not None:
            summary_writer.close()
예제 #17
0
from __future__ import absolute_import, unicode_literals

import os

from babel import Locale
from babel.support import Translations
from peewee import OperationalError

from utils.config import Configuration

TRANSLATIONS_DIR = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'locales')

try:
    locale = Configuration.get().lang
except OperationalError:
    locale = 'en_US'

translation = Translations.load(
    TRANSLATIONS_DIR,
    locales=locale,
    domain='pimpmybot'
)

_ = translation.gettext


languages = [
    Locale('en', 'US'),
    Locale('fr', 'FR')
]
sys.path.append(dirname(dirname(abspath(__file__))))
from agents.DDQL_with_prioritized_replay import Prioritized_DDQL
import numpy as np
from environments.IPP_Ypacarai import DiscreteIPP
from utils.config import Configuration
from torch.cuda import is_available as check_if_cuda_available
from torch.cuda import get_device_name
from utils.logging_utils import *
import signal

keep_going = True

console = create_console()
console.clear()
""" Create configure structure """
config = Configuration()
""" Configure seed """
config.seed = 0
""" Create scenario """
my_map = np.genfromtxt(sys.path[0] + '/example_map.csv', delimiter=',')
config.environment = DiscreteIPP(domain_type='Discrete',
                                 scenario_map=my_map,
                                 number_of_features=100,
                                 detection_area_ratio=2)

config.state_size = config.environment.reset().shape
config.action_size = config.environment.action_space.n

# Configure device
if check_if_cuda_available():
    config.device = 'cuda:0'
예제 #19
0
파일: modules.py 프로젝트: Gagaro/PimpMyBot
def get_menu():
    """ Get the additional menu items. """
    menu = []
    for module in Configuration.get().get_activated_modules():
        menu.extend(module.get_module().menus)
    return menu
예제 #20
0
    basejid = Configuration.parameters['userjid']
    simulation_dir = Configuration.parameters['current_sim_dir']
    password = Configuration.parameters['xmpp_password']
    external = es.ExternalSourceAgent(
        basejid + "/externalSource", password,
        simulation_dir + "/xml/buildingNeighborhood.xml",
        simulation_dir + "/xml/buildingLoad.xml")
    logging.debug(simulation_dir + "/xml/buildingNeighborhood.xml")
    external.simulation_setup()
    adaptor()


if __name__ == "__main__":

    Configuration.load()
    logging.info("configuration loaded")
    di.MessageFactory.init_parameters()
    setup_simulation()
    logging.info("simulation runtime built")

    setup_jid = Configuration.parameters['userjid'] + "/setupmodule"
    password = Configuration.parameters['xmpp_password']
    start_disp()
    setupmodule = sm.SetupModule(setup_jid, password)
    setupmodule.start()

    logging.info("waiting for termination")
    while True:
        try:
            time.sleep(5)
def build_runner(conf, cuda, mode):
    gen_model_conf = Configuration.from_dict(conf.generator_model, conf)
    gen_model = construct_model(gen_model_conf, gen_model_conf.name, cuda)

    val_metric_fns = {
        name: get_metric_fn(conf, name, cuda, 'test')
        for name in conf.get_attr('validation_metrics', default=[])
    }
    output_transform = get_output_transform(conf, conf.application,
                                            'inference')
    test_input_batch_transform = get_input_batch_transform(
        conf, conf.application, 'test')

    if mode == 'train':
        disc_model_conf = Configuration.from_dict(conf.discriminator_model,
                                                  conf)
        disc_model = construct_model(disc_model_conf, disc_model_conf.name,
                                     cuda)

        gen_adv_criteria = {
            loss_name: get_criterion(conf, loss_name, cuda, loss_type='gen')
            for loss_name in conf.generator_adversarial_losses
        }
        gen_criteria = {
            loss_name: get_criterion(conf, loss_name, cuda)
            for loss_name in conf.generator_losses
        }
        disc_adv_criteria = {
            loss_name: get_criterion(conf, loss_name, cuda, loss_type='disc')
            for loss_name in conf.discriminator_losses
        }

        if cuda != '':
            # Potentially split models over GPUs
            gen_model, disc_model = utils.cudaify([gen_model, disc_model],
                                                  cuda)
            utils.cudaify(
                list(gen_adv_criteria.values()) + list(gen_criteria.values()) +
                list(disc_adv_criteria.values()))

        # Important: construct optimizers after moving model to GPU!
        gen_opt_conf = Configuration.from_dict(conf.generator_optimizer, conf)
        gen_optimizer = get_optimizer(gen_opt_conf, gen_opt_conf.name,
                                      gen_model.parameters())
        gen_lr_scheduler = None
        if gen_opt_conf.has_attr('lr_scheduler'):
            gen_lr_scheduler = get_lr_scheduler(gen_opt_conf,
                                                gen_opt_conf.lr_scheduler,
                                                gen_optimizer)

        disc_opt_conf = Configuration.from_dict(conf.discriminator_optimizer,
                                                conf)
        disc_optimizer = get_optimizer(disc_opt_conf, disc_opt_conf.name,
                                       disc_model.parameters())
        disc_lr_scheduler = None
        if disc_opt_conf.has_attr('lr_scheduler'):
            disc_lr_scheduler = get_lr_scheduler(disc_opt_conf,
                                                 disc_opt_conf.lr_scheduler,
                                                 disc_optimizer)

        train_input_batch_transform = get_input_batch_transform(
            conf, conf.application, 'train')
        train_disc_metrics = conf.get_attr('train_discriminator_metrics',
                                           default=[])
        train_disc_metric_fns = {
            name: get_metric_fn(conf, name, cuda, 'train')
            for name in train_disc_metrics
        }
        val_disc_metric_key = 'validation_discriminator_metrics'
        val_disc_metric_fns = {
            name: get_metric_fn(conf, name, cuda, 'test')
            for name in conf.get_attr(val_disc_metric_key, default=[])
        }

        train_gen_metrics = conf.get_attr('train_generator_metrics',
                                          default=[])
        train_gen_metric_fns = {
            name: get_metric_fn(conf, name, cuda, 'train')
            for name in train_gen_metrics
        }

        disc_input_fn = get_discriminator_input_fn(conf, disc_model_conf)
        val_disc_input_fn = get_discriminator_input_fn(conf,
                                                       disc_model_conf,
                                                       no_pool=True)

        pretr_generator_epochs = conf.get_attr('pretrain_generator_epochs')
        pretr_discriminator_epochs = conf.get_attr(
            'pretrain_discriminator_epochs')

        runner = AdversarialRunner(
            gen_model, disc_model, gen_optimizer, disc_optimizer,
            gen_lr_scheduler, disc_lr_scheduler, gen_adv_criteria,
            gen_criteria, disc_adv_criteria,
            conf.get_attr('generator_loss_weights', {}),
            conf.get_attr('discriminator_loss_weights', {}), cuda,
            train_gen_metric_fns, train_disc_metric_fns, val_metric_fns,
            val_disc_metric_fns, output_transform, train_input_batch_transform,
            test_input_batch_transform,
            gen_opt_conf.get_attr('updates_per_step', 1),
            disc_opt_conf.get_attr('updates_per_step',
                                   1), disc_input_fn, val_disc_input_fn,
            pretr_generator_epochs, pretr_discriminator_epochs)
        if gen_model_conf.has_attr('pretrained_weights'):
            initialize_pretrained_model(gen_model_conf, runner.gen, cuda,
                                        conf.file)

        if disc_model_conf.has_attr('pretrained_weights'):
            initialize_pretrained_model(disc_model_conf, runner.disc, cuda,
                                        conf.file)
    else:
        if cuda != '':
            utils.cudaify(gen_model)
        runner = AdversarialRunner(
            gen_model,
            cuda=cuda,
            val_metric_fns=val_metric_fns,
            output_transform=output_transform,
            test_input_batch_transform=test_input_batch_transform)

    return runner
예제 #22
0
def main(argv):
    args = parser.parse_args(argv)

    # Load configuration
    conf = Configuration.from_json(args.config)
    conf.args = args
    if args.conf:
        new_conf_entries = {}
        for arg in args.conf:
            key, value = arg.split('=')
            new_conf_entries[key] = value
        conf.update(new_conf_entries)

    # Setup log directory
    if args.run_dir:
        conf.run_dir = args.run_dir
    elif args.resume:
        if os.path.exists(args.resume):
            conf.run_dir = os.path.dirname(args.resume)
    if not conf.has_attr('run_dir'):
        run_name = conf.get_attr('run_name', default='unnamed_run')
        conf.run_dir = get_run_dir(args.log_dir, run_name)
    if not args.dry:
        if not os.path.isdir(conf.run_dir):
            os.mkdir(conf.run_dir)

    setup_logging(conf.run_dir, 'train', args.verbose, args.dry)

    logging.info('Commandline arguments: {}'.format(' '.join(argv)))

    if not args.dry:
        logging.info('This run is saved to: {}'.format(conf.run_dir))
        config_path = get_config_path(conf.run_dir)
        conf.serialize(config_path)

    if args.cuda != '':
        try:
            args.cuda = utils.set_cuda_env(args.cuda)
        except Exception:
            logging.critical('No free GPU on this machine. Aborting run.')
            return
        logging.info('Running on GPU {}'.format(args.cuda))

    if args.verbose:
        logging.debug(str(conf))

    utils.set_random_seeds(conf.seed)

    # Setup model
    logging.info('Setting up training runner {}'.format(conf.runner_type))
    runner = build_runner(conf, conf.runner_type, args.cuda, mode='train')

    if args.print_model:
        print(str(runner))

    if args.print_parameters:
        print_model_parameters(runner)

    # Handle resuming from checkpoint
    restore_state = None
    if args.resume:
        if os.path.exists(args.resume):
            restore_state = restore_checkpoint(args.resume, runner)
            logging.info('Restored checkpoint from {}'.format(args.resume))
        else:
            logging.critical(('Checkpoint {} to restore '
                              'from not found').format(args.resume))
            return

    use_tensorboard = conf.get_attr('use_tensorboard',
                                    default=DEFAULT_USE_TENSORBOARD)
    if use_tensorboard and not args.dry:
        from tensorboardX import SummaryWriter
        summary_writer = SummaryWriter(conf.run_dir)
        logging.debug('Using tensorboardX summary writer')
    else:
        summary_writer = None

    # Load datasets
    num_workers = conf.get_attr('num_data_workers',
                                default=DEFAULT_NUM_WORKERS)
    num_train_samples = conf.get_attr('num_train_subset_samples', default=None)
    num_val_samples = conf.get_attr('num_validation_subset_samples',
                                    default=None)

    train_dataset_name = conf.get_attr('train_dataset', alternative='dataset')
    logging.info('Loading training dataset {}'.format(train_dataset_name))
    train_dataset = load_dataset(conf, args.data_dir, train_dataset_name,
                                 'train')
    train_sampler = maybe_get_subset_sampler(num_train_samples, train_dataset)
    train_loader = DataLoader(dataset=train_dataset,
                              num_workers=num_workers,
                              batch_size=conf.batch_size,
                              sampler=train_sampler,
                              shuffle=train_sampler is None,
                              worker_init_fn=utils.set_worker_seeds)

    val_dataset_name = conf.get_attr('validation_dataset',
                                     alternative='dataset')
    logging.info('Loading validation dataset {}'.format(val_dataset_name))
    val_dataset = load_dataset(conf, args.data_dir, val_dataset_name, 'val')
    val_sampler = maybe_get_subset_sampler(num_val_samples, val_dataset)
    val_loader = DataLoader(dataset=val_dataset,
                            num_workers=num_workers,
                            batch_size=conf.get_attr('validation_batch_size',
                                                     default=conf.batch_size),
                            sampler=val_sampler,
                            shuffle=False,
                            worker_init_fn=utils.set_worker_seeds)

    # Setup validation checkpoints
    chkpt_metrics = conf.get_attr('validation_checkpoint_metrics', default=[])
    chkpt_metric_dirs = {
        metric: os.path.join(conf.run_dir, 'best_' + metric)
        for metric in chkpt_metrics
    }
    for metric_dir in chkpt_metric_dirs.values():
        if not args.dry and not os.path.isdir(metric_dir):
            os.mkdir(metric_dir)

    # Setup early stopping
    if conf.has_attr('early_stopping'):
        from training.early_stopping import EarlyStopper
        early_stoppers = [
            EarlyStopper(conf.early_stopping['metric_name'],
                         conf.early_stopping['patience'],
                         conf.early_stopping.get('min_value', None),
                         conf.early_stopping.get('max_difference', None))
        ]
    elif conf.has_attr('early_stoppers'):
        from training.early_stopping import EarlyStopper
        early_stoppers = []
        for early_stopping_conf in conf.early_stoppers:
            min_value = early_stopping_conf.get('min_value', None)
            max_diff = early_stopping_conf.get('max_difference', None)
            early_stoppers.append(
                EarlyStopper(early_stopping_conf['metric_name'],
                             early_stopping_conf['patience'], min_value,
                             max_diff))
    else:
        early_stoppers = []

    logging.info('Starting training run of {} epochs'.format(conf.num_epochs))

    # Train
    try:
        train_net(conf, runner, train_loader, val_loader, args.cuda,
                  chkpt_metric_dirs, restore_state, summary_writer,
                  early_stoppers)
    except KeyboardInterrupt:
        if summary_writer is not None:
            summary_writer.close()
        [input_transform, Lambda(lambda x: x[::downscale, ::downscale])],
        [_to_torch_tensor(), _to_torch_tensor()]
    ])

    return transform


if __name__ == '__main__':
    # Run from main directory with python -m data.reconstruction.rec_seg_transforms
    import matplotlib.pyplot as plt
    from torch.utils.data import DataLoader

    from data.reconstruction.scar_seg import (get_train_set, get_val_set)
    from utils.config import Configuration

    conf = Configuration()
    conf.input_mode = '2d'
    conf.dataset_mode = 'all'
    conf.downscale = 1
    conf.undersampling = {
        'sampling_scheme': "varden",
        'acceleration_factor': 8,
        'variable_acceleration': False
    }

    # TRAINING
    train_set = get_train_set(conf, '../../data')
    loader = DataLoader(dataset=train_set, num_workers=1,
                        batch_size=2, shuffle=True)

    # apply the transform
예제 #24
0
def build_runner(conf, cuda, mode='train', resume=False):
    model_conf = Configuration.from_dict(conf.model)

    model = construct_model(model_conf, model_conf.name)

    val_metric_transform = get_output_transform(conf, conf.application, 'test')
    val_metric_fns = {
        name: get_metric_fn(name)
        for name in conf.get_attr('validation_metrics', default=[])
    }
    output_transform = get_output_transform(conf, conf.application, 'output')

    if mode == 'train':
        criteria = {}
        if conf.has_attr('loss_name'):
            criteria[conf.loss_name] = get_criterion(conf, conf.loss_name,
                                                     cuda)
        else:
            for loss_name in conf.losses:
                criteria[loss_name] = get_criterion(conf, loss_name, cuda)

        assert len(
            criteria) > 0, 'Need at least one loss to optimize something!'

        if cuda != '':
            utils.cudaify([model] + list(criteria.values()))

        # Important: construct optimizer after moving model to GPU!
        opt_conf = Configuration.from_dict(conf.optimizer)
        optimizer = get_optimizer(opt_conf, opt_conf.name, model.parameters())

        lr_scheduler = None
        if opt_conf.has_attr('lr_scheduler'):
            lr_scheduler = get_lr_scheduler(opt_conf, opt_conf.lr_scheduler,
                                            optimizer)

        train_metric_transform = get_output_transform(conf, conf.application,
                                                      'train')
        train_metric_fns = {
            name: get_metric_fn(name)
            for name in conf.get_attr('train_metrics', default=[])
        }

        runner = Runner(model, criteria, conf.get_attr('loss_weights', {}),
                        optimizer, lr_scheduler, cuda, train_metric_fns,
                        train_metric_transform, val_metric_fns,
                        val_metric_transform, output_transform)

        if model_conf.has_attr('pretrained_weights') and not resume:
            runner.initialize_pretrained_model(model_conf, runner.model, cuda,
                                               conf.file)
    else:
        if cuda != '':
            utils.cudaify(model)
        runner = Runner(model,
                        cuda=cuda,
                        val_metric_fns=val_metric_fns,
                        val_metric_transform=val_metric_transform,
                        output_transform=output_transform)

    return runner
예제 #25
0
from utils.config import Configuration

conf = Configuration('app.conf', markdown='main')

PROJECT_NAME = conf.get('GENERIC', 'project_name')
VERSION = conf.get('GENERIC', 'version') or 'Aftermath'
TELEGRAM_BOT_TOKEN = conf.get('TELEGRAM', 'token')

AUTHOR_FULLNAME = conf.get('ABOUT', 'author_fullname')
AUTHOR_TELEGRAM = conf.get('ABOUT', 'author_telegram')
sys.path.append(dirname(dirname(abspath(__file__))))
from agents.Soft_AC import Soft_Actors_Critic
import numpy as np
from environments.IPP_Ypacarai import DiscreteIPP
from utils.config import Configuration
from torch.cuda import is_available as check_if_cuda_available
from torch.cuda import get_device_name
from utils.logging_utils import *
import signal

keep_going = True

console = create_console()
console.clear()
""" Create configure structure """
config = Configuration()
""" Configure seed """
config.seed = 0
""" Create scenario """
my_map = np.genfromtxt(sys.path[0] + '/example_map.csv', delimiter=',')
config.environment = DiscreteIPP(domain_type='Continuous',
                                 scenario_map=my_map,
                                 number_of_features=100,
                                 detection_area_ratio=4)

config.state_size = config.environment.reset().shape
config.action_size = config.environment.action_size
""" Configure device """
if check_if_cuda_available():
    config.device = 'cuda:0'
    config.device_name = get_device_name(0)
예제 #27
0
configuration_markup = {
    'config': {
        'ready': False,
    },
    'runtime': {
        'secret_key': None,
        'debug': False,
        'allowed_hosts': 'localhost, 127.0.0.1',
    },
    'application': {
        'api_delay': 0,
    }
}

conf_path = 'conf/project.conf'
conf = Configuration(conf_path, configuration_markup)

BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))

PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))

CONF_READY = conf.get_bool('config', 'ready')

if not CONF_READY:
    print(
        '[INFO] Configuration file has been generated but seems to be not yet edited.'
    )
    print('[INFO] Check: {}'.format(os.path.join(PROJECT_ROOT, conf_path)))
    print('[INFO] Set option \'ready\' to \'True\' when you\'ll finish setup.')
    print('[INFO] Aborting server startup.')
    exit(0)
예제 #28
0
def get_seg_score_obj(dataset, conf_path, cuda, conf_rel_path):
  from metrics.segmentation_score import SegmentationScore
  conf = Configuration.from_json(conf_path)
  seg_score = SegmentationScore(conf, conf_rel_path, cuda,
                                class_idx=CLASS_IDX, skip_empty_images=True)
  return seg_score
예제 #29
0
파일: eval.py 프로젝트: liminghao0914/srgan
def main(argv):
  args = parser.parse_args(argv)

  if args.cuda != '':
    try:
      args.cuda = utils.set_cuda_env(args.cuda)
    except Exception:
      print('No free GPU on this machine. Aborting run.')
      return
    print('Running on GPU {}'.format(args.cuda))

  # Load configuration
  conf = Configuration.from_json(args.config)
  conf.args = args
  if args.conf:
    new_conf_entries = {}
    for arg in args.conf:
      key, value = arg.split('=')
      new_conf_entries[key] = value
    conf.update(new_conf_entries)
  if args.verbose:
    print(conf)

  utils.set_random_seeds(conf.seed)

  # Setup model
  runner = build_runner(conf, conf.runner_type, args.cuda, mode='test')

  # Handle resuming from checkpoint
  if args.checkpoint != 'NONE':
    if os.path.exists(args.checkpoint):
      _ = restore_checkpoint(args.checkpoint, runner, cuda=args.cuda)
      print('Restored checkpoint from {}'.format(args.checkpoint))
    else:
      print('Checkpoint {} to restore from not found'.format(args.checkpoint))
      return

  # Evaluate on full image, not crops
  conf.full_image = True

  # Load datasets
  mode = 'dataset'
  if len(args.files_or_dirs) == 0:
    datasets = [load_dataset(conf, args.data_dir, conf.validation_dataset, args.fold)]
  else:
    datasets = []
    for f in args.files_or_dirs:
      if is_dataset(f):
        dataset = load_dataset(conf, args.data_dir, f, args.fold)
        datasets.append(dataset)
      else:
        mode = 'image'
        transform = get_sr_transform(conf, 'test', downscale=False)
        datasets = [make_sr_dataset_from_folder(conf, f, transform,
                                                inference=True)
                    for f in args.files_or_dirs]

  num_workers = conf.get_attr('num_data_workers', default=DEFAULT_NUM_WORKERS)

  # Evaluate all datasets
  for dataset in datasets:
    loader = DataLoader(dataset=dataset,
                        num_workers=num_workers,
                        batch_size=1,
                        shuffle=False)

    if mode == 'dataset':
      data, _, val_metrics = runner.validate(loader, len(loader))

      print('Average metrics for {}'.format(dataset.name))
      for metric_name, metric in val_metrics.items():
        print('     {}: {}'.format(metric_name, metric))
    else:
      data = runner.infer(loader)

    if args.infer or args.dump:
      if mode == 'dataset':
        output_dir = get_run_dir(args.out_dir, dataset.name)
        if not os.path.isdir(output_dir):
          os.mkdir(output_dir)

      file_idx = 0
      for batch in data:
        if mode == 'image':
          output_dir = os.path.dirname(dataset.images[file_idx])

        named_batch = runner.get_named_outputs(batch)
        inputs = named_batch['input']
        predictions = named_batch['prediction']
        targets = named_batch['target']
        for (inp, target, prediction) in zip(inputs, targets, predictions):
          image_file = os.path.basename(dataset.images[file_idx])
          name, _ = os.path.splitext(image_file)
          file_idx += 1

          if args.dump:
            input_file = os.path.join(output_dir,
                                      '{}_input.png'.format(name))
            save_image(inp.data, input_file)
            target_file = os.path.join(output_dir,
                                       '{}_target.png'.format(name))
            save_image(target.data, target_file)
          pred_file = os.path.join(output_dir,
                                   '{}_pred.png'.format(name))
          save_image(prediction.data, pred_file)
예제 #30
0
def main(argv):
  args = parser.parse_args(argv)

  setup_logging(os.path.dirname(args.checkpoint), 'eval',
                args.verbose, args.dry)

  logging.info('Commandline arguments: {}'.format(' '.join(argv)))

  if args.cuda != '':
    try:
      args.cuda = utils.set_cuda_env(args.cuda)
    except Exception:
      logging.critical('No free GPU on this machine. Aborting run.')
      return
    logging.info('Running on GPU {}'.format(args.cuda))

  # Load configuration
  conf = Configuration.from_json(args.config)
  conf.args = args
  if args.conf:
    new_conf_entries = {}
    for arg in args.conf:
      key, value = arg.split('=')
      new_conf_entries[key] = value
    conf.update(new_conf_entries)

  if args.verbose:
    logging.debug(conf)

  utils.set_random_seeds(conf.seed)

  if args.raw:
    # This is a hack to suppress the output transform when we request raw data
    conf.application = 'none'
    if conf.has_attr('tasks'):
      for name, task in conf.tasks.items():
        if 'application' in task:
          logging.debug(('Changing output transform in task {} '
                         'from {} to none').format(name,
                                                   task['application']))
          task['application'] = 'none'

  # Setup model
  runner = build_runner(conf, conf.runner_type, args.cuda, mode='test')

  # Handle resuming from checkpoint
  if args.checkpoint != 'NONE':
    if os.path.exists(args.checkpoint):
      _ = restore_checkpoint(args.checkpoint, runner, cuda=args.cuda)
      logging.info('Restored checkpoint from {}'.format(args.checkpoint))
    else:
      logging.critical(('Checkpoint {} to restore '
                       'from not found').format(args.checkpoint))
      return

  # Load datasets
  mode = 'dataset'
  if len(args.files_or_dirs) == 0:
    datasets = [load_dataset(conf, args.data_dir,
                             conf.validation_dataset, args.fold)]
  else:
    datasets = []
    for f in args.files_or_dirs:
      if is_dataset(f):
        dataset = load_dataset(conf, args.data_dir, f, args.fold)
        datasets.append(dataset)

  if args.raw:
    mode = 'raw'

  num_samples = conf.get_attr('num_validation_subset_samples',
                              default=None)

  # Evaluate all datasets
  for dataset in datasets:
    logging.info('Evaluating dataset {}'.format(dataset.name))

    sampler = maybe_get_subset_sampler(num_samples, dataset)
    loader = DataLoader(dataset=dataset,
                        num_workers=DEFAULT_NUM_WORKERS,
                        batch_size=1,
                        sampler=sampler,
                        shuffle=False)

    if mode == 'dataset':
      data, _, val_metrics = runner.validate(loader, len(loader))

      res_str = 'Average metrics for {}\n'.format(dataset.name)
      for metric_name, metric in val_metrics.items():
        res_str += '     {}: {}\n'.format(metric_name, metric)
      logging.info(res_str)
    else:
      data = runner.infer(loader)

    if not args.dry and (args.infer or args.dump):
      if mode == 'dataset' or mode == 'raw':
        conf_name = os.path.splitext(os.path.basename(conf.file))[0]
        output_dir = get_run_dir(args.out_dir, '{}_{}'.format(dataset.name,
                                                              conf_name))
        if not os.path.isdir(output_dir):
          os.mkdir(output_dir)

      logging.info('Writing images to {}'.format(output_dir))

      file_idx = 0
      for batch in data:
        if mode == 'image':
          output_dir = os.path.dirname(dataset.images[file_idx])

        named_batch = runner.get_named_outputs(batch)
        inp = named_batch['input']

        if 'prediction' in named_batch:
          batch_size = named_batch['prediction'].shape[0]
          filenames = [dataset.get_filename(idx)
                       for idx in range(file_idx, file_idx + batch_size)]
          save_output_images(dataset, inp, named_batch['prediction'],
                             named_batch['target'], output_dir,
                             filenames, 'default', args.dump, args.raw)

        file_idx += len(filenames)

      logging.info(('Finished writing images for '
                   'dataset {}').format(dataset.name))
예제 #31
0
def build_runner(conf, cuda, mode, resume=False):
  gen_model_conf = Configuration.from_dict(conf.generator_model)
  gen_model = construct_model(gen_model_conf, gen_model_conf.name)

  val_metric_transform = get_output_transform(conf, conf.application, 'test')
  val_metric_fns = {name: get_metric_fn(name)
                    for name in conf.get_attr('validation_metrics',
                                              default=[])}
  output_transform = get_output_transform(conf, conf.application, 'output')

  if mode == 'train':
    disc_model_conf = Configuration.from_dict(conf.discriminator_model)
    disc_model = construct_model(disc_model_conf, disc_model_conf.name)

    gen_adv_criteria = {loss_name: get_criterion(conf, loss_name, cuda, 'gen')
                        for loss_name in conf.generator_adversarial_losses}
    gen_criteria = {loss_name: get_criterion(conf, loss_name, cuda)
                    for loss_name in conf.generator_losses}
    disc_adv_criteria = {loss_name: get_criterion(conf, loss_name, cuda,
                                                  'disc')
                         for loss_name in conf.discriminator_losses}

    if cuda != '':
      utils.cudaify([gen_model, disc_model] +
                    list(gen_adv_criteria.values()) +
                    list(gen_criteria.values()) +
                    list(disc_adv_criteria.values()))

    # Important: construct optimizers after moving model to GPU!
    gen_opt_conf = Configuration.from_dict(conf.generator_optimizer)
    gen_optimizer = get_optimizer(gen_opt_conf, gen_opt_conf.name,
                                  gen_model.parameters())
    gen_lr_scheduler = None
    if gen_opt_conf.has_attr('lr_scheduler'):
      gen_lr_scheduler = get_lr_scheduler(gen_opt_conf,
                                          gen_opt_conf.lr_scheduler,
                                          gen_optimizer)

    disc_opt_conf = Configuration.from_dict(conf.discriminator_optimizer)
    disc_optimizer = get_optimizer(disc_opt_conf, disc_opt_conf.name,
                                   disc_model.parameters())
    disc_lr_scheduler = None
    if disc_opt_conf.has_attr('lr_scheduler'):
      disc_lr_scheduler = get_lr_scheduler(disc_opt_conf,
                                           disc_opt_conf.lr_scheduler,
                                           disc_optimizer)

    train_disc_metrics = conf.get_attr('train_discriminator_metrics',
                                       default=[])
    train_disc_metric_fns = {name: get_metric_fn(name)
                             for name in train_disc_metrics}

    train_gen_metric_transform = get_output_transform(conf, conf.application,
                                                      'train')
    train_gen_metrics = conf.get_attr('train_generator_metrics', default=[])
    train_gen_metric_fns = {name: get_metric_fn(name)
                            for name in train_gen_metrics}

    input_method = disc_model_conf.get_attr('input_method',
                                            default=DEFAULT_INPUT_METHOD)

    runner = AdversarialRunner(gen_model, disc_model,
                               gen_optimizer, disc_optimizer,
                               gen_lr_scheduler, disc_lr_scheduler,
                               gen_adv_criteria, gen_criteria,
                               disc_adv_criteria,
                               conf.get_attr('generator_loss_weights', {}),
                               conf.get_attr('discriminator_loss_weights', {}),
                               cuda,
                               train_gen_metric_fns,
                               train_gen_metric_transform,
                               train_disc_metric_fns,
                               val_metric_fns,
                               val_metric_transform,
                               output_transform,
                               input_method)
    if gen_model_conf.has_attr('pretrained_weights') and not resume:
      runner.initialize_pretrained_model(gen_model_conf, runner.gen,
                                         cuda, conf.file)

    if disc_model_conf.has_attr('pretrained_weights') and not resume:
      runner.initialize_pretrained_model(disc_model_conf, runner.disc,
                                         cuda, conf.file)
  else:
    if cuda != '':
      utils.cudaify(gen_model)
    runner = AdversarialRunner(gen_model,
                               cuda=cuda,
                               val_metric_fns=val_metric_fns,
                               val_metric_transform=val_metric_transform,
                               output_transform=output_transform)

  return runner
예제 #32
0
def configuration_view():
    return {
        'config': Configuration.get(),
        'languages': languages,
    }
        pred = torch.clamp(complex_abs(pred), min=0.0, max=1.0)
        target = torch.clamp(complex_abs(target), min=0.0, max=1.0)
        return pred, target

    return transform


if __name__ == '__main__':
    # Run from main directory with python -m data.reconstruction.rec_transforms
    import matplotlib.pyplot as plt
    from torch.utils.data import DataLoader

    from data.reconstruction.scar_seg import (get_train_set, get_val_set)
    from utils.config import Configuration

    conf = Configuration()
    conf.input_mode = '2d'
    conf.dataset_mode = 'reconstruction'
    conf.downscale = 1
    conf.undersampling = {
        'sampling_scheme': "varden",
        'acceleration_factor': 8,
        'variable_acceleration': False
    }
    conf.augmentation = {
        #'elastic_transform_sigma': 30,
        #'elastic_transform_alpha': 1000,
        'shift': (0, 10),
        'rotate': 10,
        'scale': (0.9, 1.1)
    }
예제 #34
0
import pypeliner.workflow
import pypeliner.app
import pypeliner.managed

import sys
import os
import shutil

from interface.tenxanalysis import TenxAnalysis
from utils.isabl import TenxDataStorage
from interface.qualitycontrol import QualityControl

from utils.config import Configuration, write_config

config = Configuration()


def Run(sampleid, species, umi_plot, mito_plot, ribo_plot, counts_plot,
        raw_sce):
    print("Running QC.")
    tenx = TenxDataStorage(sampleid)
    tenx_analysis = TenxAnalysis(tenx.tenx_path)
    tenx_analysis.load()
    tenx_analysis.extract()
    qc = QualityControl(tenx_analysis, sampleid)
    qc.run(mito=config.mito)
    plots = qc.plots
    umi = os.path.join(plots, "umi.png")
    mito = os.path.join(plots, "mito.png")
    ribo = os.path.join(plots, "ribo.png")
    counts = os.path.join(plots, "counts.png")
예제 #35
0
import os

from utils.config import Configuration

conf = Configuration('project.conf', markdown='main')

BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))

PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))

SECRET_KEY = conf.get('PREDEFINED', 'secret_key')

DEBUG = conf.get('PREDEFINED', 'debug', 'bool')

ALLOWED_HOSTS = conf.get('DEPLOY', 'allowed_hosts', 'csv')

EMAIL_ENABLED = conf.get('EMAIL', 'enabled', 'bool')

if EMAIL_ENABLED:
    EMAIL_USE_TLS = conf.get('EMAIL', 'use_tls', 'bool')
    EMAIL_HOST = conf.get('EMAIL', 'host')
    EMAIL_HOST_USER = conf.get('EMAIL', 'host_user')
    EMAIL_HOST_PASSWORD = conf.get('EMAIL', 'host_password')
    EMAIL_PORT = conf.get('EMAIL', 'port')
    DEFAULT_FROM_EMAIL = conf.get('EMAIL', 'default_from')

INSTALLED_APPS = [
    'django.contrib.admin',
    'django.contrib.auth',
    'django.contrib.contenttypes',
    'django.contrib.sessions',