コード例 #1
0
ファイル: confibula.py プロジェクト: Anselmatus/confibula
    def init(self):
        print "Loading config from confibula.cfg"
        self.config = config.Config('confibula.cfg')
        self.config.load()
        self.log['environment'] = self.config.getAll()
        self.log['frogs'] = {"males":{}, "females":{}}
        self.log['final'] = {}
        print "Config loaded."
        print "Starting simulation."

        logger.init()
        i=2
        while exists(logger.fileName):
            logger.fileName = 'logs/'+ str(i) +'.log'
            i+=1
        logger.console = self.config.getValue("consoleOutput")
        logger.logFile = self.config.getValue("logfileOutput")

        # /!\ ------------ RESERVED to Chou & Antoine ------------ /!\
        # Initialisation of the environment
        self.loadEnvironment()
	self.offsetCamera( breve.vector( 0, 0, 22) ) # (0, 0, z) depends on the picture's size
        # End of environment

        # Loading frogs
        self.loadMaleFrogs()
	self.movement = breve.createInstances(breve.Movement, 1)
コード例 #2
0
    def run(self):

        workers = []
        log_queue = Queue()

        cache = ConcurrentCache(self.cache_size, Manager())

        # Create pool of workers
        for i in range(self.num_workers):
            w = Worker(i, self.server_socket, log_queue, cache, self.url_fs,
                       self.num_fs)
            workers.append(w)
            w.start()

        # Set the ignore flag in main process
        # for SIGINT signal
        signal.signal(signal.SIGINT, signal.SIG_IGN)

        # Close server connection
        self.server_socket.close()

        logger.init('http-server')

        logging = Process(target=logger.log_worker, args=(log_queue, ))
        logging.start()

        # Wait to workers to finish
        for j in range(self.num_workers):
            workers[j].join()

        # Tell the logger to finish
        log_queue.put(None)
        logging.join()

        print('Server finished')
コード例 #3
0
ファイル: server.py プロジェクト: mymsimple/layout-analysis
def startup(app):
    conf.init_arguments()
    log.init(conf.system_config.log_dir)
    # init_log()
    _logger().debug('启动模式:%s,子进程:%s,父进程:%s,线程:%r', conf.MODE, os.getpid(), os.getppid(), current_thread())

    # 初始化各种变量(全局)
    server_utils.init_single(conf.MODE)

    # 注册所有蓝图
    regist_blueprint(app)
    _logger().info("注册完所有路由:\n %r", app.url_map)
    _logger().info("系统启动完成")
コード例 #4
0
    def run(self):

        dispatchers = []

        logger.init('fs-server')

        log_queue = Queue()
        req_queue = Queue()
        res_queues = [Queue() for i in range(self.num_workers)]

        # Create pool of dispatchers
        for i in range(self.num_workers):
            d = Dispatcher(i, self.server_socket, req_queue, res_queues[i],
                           log_queue)
            dispatchers.append(d)
            d.start()

        # Set the ignore flag in main process
        # for SIGINT signal
        signal.signal(signal.SIGINT, signal.SIG_IGN)

        # Close server connection
        self.server_socket.close()

        # Create Cache and FS controller
        r = RequestExec(req_queue, res_queues, self.cache_size)
        r.start()

        # Create 'logger' process
        lp = Process(target=logger.log_worker, args=(log_queue, ))
        lp.start()

        # Wait for dispatchers to finish
        for j in range(self.num_workers):
            dispatchers[j].join()

        # Tell the 'RequestExec' to finish
        req_queue.put(None)
        r.join()

        # Tell the 'logger' to finish
        log_queue.put(None)
        lp.join()

        print('File System server finished')
コード例 #5
0
ファイル: train.py プロジェクト: hpbyte/myanmar-tts
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--base_dir', default=os.path.expanduser('~/mm-tts'))
    parser.add_argument('--input', default='training/train.txt')
    parser.add_argument('--log_dir', default=os.path.expanduser('~/mm-tts'))
    parser.add_argument('--restore_step',
                        type=int,
                        help='Global step to restore from checkpoint.')
    parser.add_argument('--summary_interval',
                        type=int,
                        default=100,
                        help='Steps between running summary ops.')
    parser.add_argument('--checkpoint_interval',
                        type=int,
                        default=1000,
                        help='Steps between writing checkpoints.')
    args = parser.parse_args()

    log_dir = os.path.join(args.log_dir, 'logs-mmspeech')
    os.makedirs(log_dir, exist_ok=True)
    logger.init(os.path.join(log_dir, 'train.log'))

    train(log_dir, args)
コード例 #6
0
ファイル: run.py プロジェクト: idjaw/base_python_skeleton
from utils import logger
import logging

logger.init()
log = logging.getLogger('main')

print('asdf')
log.debug('this works')
log.info('asdfasdfsadf')
log.error('asdfasdf')
print('asdf')
if __name__ == '__main__':
    pass
コード例 #7
0
        # logger.debug("Loaded images:  %r", images.shape)
        # logger.debug("Loaded batch_cs:%r", batch_cs.shape)
        # logger.debug("Loaded batch_om:%r", batch_om.shape)
        # logger.debug("Loaded batch_lm:%r", batch_lm.shape)
        # logger.debug("[%s] loaded %d data", self.name, len(images))

        return images, [batch_cs, batch_os, batch_lm, labels]

    def __getitem__(self, idx):
        batch_data_list = self.data_list[idx * self.batch_size:(idx + 1) *
                                         self.batch_size]
        images, labels = self.load_image_label(batch_data_list)
        print(images)
        return images, labels


if __name__ == '__main__':
    log.init()
    args = conf.init_args()
    charset = label_utils.get_charset(conf.CHARSET)
    train_sequence = SequenceData(name="Train",
                                  label_dir=args.train_label_dir,
                                  label_file=args.train_label_file,
                                  charsets=charset,
                                  conf=conf,
                                  args=args,
                                  batch_size=args.batch)

    for seq in train_sequence:
        print(seq)
コード例 #8
0
ファイル: GanTrainer.py プロジェクト: saurabheights/MLMI
def train_gan(arguments):
    """ Setup result directory and enable logging to file in it """
    outdir = make_results_dir(arguments)
    logger.init(outdir, logging.INFO)
    logger.info('Arguments:\n{}'.format(pformat(arguments)))
    """ Initialize Tensorboard """
    tensorboard_writer = initialize_tensorboard(outdir)
    """ Set random seed throughout python, pytorch and numpy """
    logger.info('Using Random Seed value as: %d' % arguments['random_seed'])
    torch.manual_seed(
        arguments['random_seed'])  # Set for pytorch, used for cuda as well.
    random.seed(arguments['random_seed'])  # Set for python
    np.random.seed(arguments['random_seed'])  # Set for numpy
    """ Set device - cpu or gpu """
    device = torch.device(
        f"cuda:{opt.gpu}" if torch.cuda.is_available() else "cpu")
    logger.info(f'Using device - {device}')
    """ Load Model with weights(if available) """
    G: torch.nn.Module = get_model(
        arguments.get('generator_model_args')).to(device)
    D: torch.nn.Module = get_model(
        arguments.get('discriminator_model_args')).to(device)

    if arguments['mode'] == 'dcgan':
        G.apply(weights_init)
        D.apply(weights_init)
    """ Create optimizer """
    G_optimizer = create_optimizer(G.parameters(),
                                   arguments['generator_optimizer_args'])
    D_optimizer = create_optimizer(D.parameters(),
                                   arguments['discriminator_optimizer_args'])
    """ Create Loss """
    loss = torch.nn.BCELoss().to(device=device)  # GAN
    """ Load parameters for the Dataset """
    dataset: BaseDataset = create_dataset(arguments['dataset_args'],
                                          arguments['train_data_args'],
                                          arguments['val_data_args'])
    """ Generate all callbacks """
    callbacks: List[Callbacks] = generate_callbacks(arguments, dataset, device,
                                                    outdir)

    # """ Create loss function """
    # criterion = create_loss(arguments['loss_args'])
    """ Debug the inputs to model and save graph to tensorboard """
    dataset.debug()

    # Only One model is allowed
    # G_dummy_input = torch.rand(size=(1, arguments['generator_model_args']['model_constructor_args']['latent_dim']))
    # D_dummy_input = (torch.rand(1,
    #                           arguments['dataset_args']['name'].value['channels'],
    #                           32, 32  # *arguments['dataset_args']['name'].value['image_size']  # ToDo Fix this
    #                           ))
    # tensorboard_writer.save_graph('Generator', G, G_dummy_input.to(device))
    # tensorboard_writer.save_graph('Discriminator', D, D_dummy_input.to(device))
    logger.info(G)
    logger.info(D)

    def reset_grad():
        G.zero_grad()
        D.zero_grad()

    batch_size = arguments['train_data_args']['batch_size']
    z_dim = arguments['generator_model_args']['model_constructor_args']['nz']

    generator = infinite_train_gen(dataset.train_dataloader)
    interval_length = 10 if is_debug_mode() else 400
    num_intervals = 1 if is_debug_mode() else int(arguments['num_iterations'] /
                                                  interval_length)

    global_step = 0

    # TO allocate memory required for the GPU during training and validation
    run_callbacks(
        callbacks,
        model=(G, D),
        optimizer=(G_optimizer,
                   D_optimizer),  # To Save optimizer dict for retraining.
        mode=CallbackMode.ON_NTH_ITERATION,
        iteration=global_step)
    reset_grad()

    for it in range(num_intervals):

        logger.info(f'Interval {it + 1}/{num_intervals}')

        # Set model in train mode
        G.train()
        D.train()

        t = trange(interval_length)
        for _ in t:
            if arguments['mode'] == 'dcgan':
                D_loss, G_loss = train_gan_iter(D, D_optimizer, G, G_optimizer,
                                                loss, device, generator,
                                                batch_size, reset_grad, z_dim,
                                                tensorboard_writer,
                                                global_step)
            elif arguments['mode'] == 'wgan-wp':
                D_loss, G_loss = train_wgan_iter(D, D_optimizer, G,
                                                 G_optimizer, device,
                                                 generator, batch_size,
                                                 reset_grad, z_dim,
                                                 tensorboard_writer,
                                                 global_step)
            elif arguments['mode'] == 'wgan-noise-adversarial':
                D_loss, G_loss = train_noisy_wgan_iter(
                    D,
                    D_optimizer,
                    G,
                    G_optimizer,
                    device,
                    generator,
                    batch_size,
                    reset_grad,
                    z_dim,
                    tensorboard_writer,
                    global_step,
                    contamination_loss_weight=arguments[
                        'contamination_loss_weight'])

            # Log D_Loss and G_Loss in progress_bar
            t.set_postfix(D_Loss=D_loss.data.cpu().item(),
                          G_Loss=G_loss.data.cpu().item())

            # Save Loss In Tensorboard
            tensorboard_writer.save_scalars(
                f'{arguments["mode"].upper()}_Loss', {
                    'Discriminator' if arguments['mode'] == 'dcgan' else 'Critic':
                    D_loss.data.cpu().item(),
                    'Generator':
                    G_loss.data.cpu().item()
                }, global_step)
            global_step += 1

        print(
            f'Discriminator Loss: {D_loss.data.cpu().item()}, Generator Loss: {G_loss.data.cpu().item()}'
        )

        run_callbacks(
            callbacks,
            model=(G, D),
            optimizer=(G_optimizer,
                       D_optimizer),  # To Save optimizer dict for retraining.
            mode=CallbackMode.ON_NTH_ITERATION,
            iteration=global_step)
        reset_grad()
コード例 #9
0
    )

    val_data_args = dict(
        batch_size=train_data_args['batch_size'] * 4,
        shuffle=False,
        validate_step_size=1,
    )

    dataset: BaseDataset = create_dataset(dataset_args, train_data_args,
                                          val_data_args)

    eval_model.load_state_dict(
        torch.load(
            './logs/2019-12-22T02:24:08.329024_mode_classification_model_ConvNetSimple_dataset_MNIST_subset_1.0_bs_64_name_Adam_lr_0.001/epoch_0032-model-val_accuracy_99.11754911754912.pth'
        ))
    outdir = './logs/2019-12-22T02:24:08.329024_mode_classification_model_ConvNetSimple_dataset_MNIST_subset_1.0_bs_64_name_Adam_lr_0.001/'
    device = torch.device(f"cuda:0" if torch.cuda.is_available() else "cpu")

    logger.init(outdir, logging.INFO)
    start = time.time()
    callback = InceptionScoreCallback(eval_model,
                                      device=device,
                                      dataset=dataset,
                                      mode='classifier',
                                      outdir=outdir)
    logger.info(
        f'Inception Score of real dataset is {callback.compute_inception_score()}'
    )
    end = time.time()
    logger.info(f'Time taken = {end - start}')
コード例 #10
0
def objective(arguments):
    """
    Main Pipeline for training and cross-validation. ToDo - Testing will be done separately in test.py.
    """
    """ Setup result directory and enable logging to file in it """
    outdir = make_results_dir(arguments)
    logger.init(outdir, logging.INFO)
    logger.info('Arguments:\n{}'.format(pformat(arguments)))
    """ Initialize Tensorboard """
    tensorboard_writer = initialize_tensorboard(outdir)
    """ Set random seed throughout python, pytorch and numpy """
    logger.info('Using Random Seed value as: %d' % arguments['random_seed'])
    torch.manual_seed(
        arguments['random_seed'])  # Set for pytorch, used for cuda as well.
    random.seed(arguments['random_seed'])  # Set for python
    np.random.seed(arguments['random_seed'])  # Set for numpy
    """ Set device - cpu or gpu """
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    logger.info(f'Using device - {device}')
    """ Load Model with weights(if available) """
    model: torch.nn.Module = get_model(arguments.get('model_args')).to(device)
    """ Create loss function """
    criterion = create_loss(arguments['loss_args'])
    """ Create optimizer """
    optimizer = create_optimizer(model.parameters(),
                                 arguments['optimizer_args'])
    """ Load parameters for the Dataset """
    dataset: BaseDataset = create_dataset(arguments['dataset_args'],
                                          arguments['train_data_args'],
                                          arguments['val_data_args'])
    """ Generate all callbacks """
    callbacks: List[Callbacks] = generate_callbacks(arguments, dataset, device,
                                                    outdir)
    """ Debug the inputs to model and save graph to tensorboard """
    dataset.debug()
    dummy_input = (torch.rand(
        1,
        arguments['dataset_args']['name'].value['channels'],
        *arguments['dataset_args']['name'].value['image_size'],
    )).to(device)
    tensorboard_writer.save_graph(model, dummy_input)
    """ Pipeline - loop over the dataset multiple times """
    max_validation_accuracy = 0
    itr = 0

    best_model_path = None
    delete_old_models = True

    run_callbacks(callbacks,
                  model=model,
                  optimizer=optimizer,
                  mode=CallbackMode.ON_TRAIN_BEGIN)
    for epoch in range(arguments['nb_epochs']):
        """ Train the model """
        train_data_args = arguments['train_data_args']
        if train_data_args['to_train']:
            train_dataloader = dataset.train_dataloader
            progress_bar = ProgressBar(
                target=len(train_dataloader),
                clear=True,
                description=f"Training {epoch + 1}/{arguments['nb_epochs']}: ")
            loss_running_average = RunningAverage()

            run_callbacks(callbacks,
                          model=model,
                          optimizer=optimizer,
                          mode=CallbackMode.ON_EPOCH_BEGIN,
                          epoch=epoch)
            model.train()
            for i, data in enumerate(train_dataloader, 0):
                # get the inputs
                inputs, labels = data
                inputs = inputs.to(device)
                labels = labels.to(device)

                # zero the parameter gradients
                optimizer.zero_grad()

                # Forward Pass
                outputs = model(inputs)

                classification_loss = criterion(outputs, labels)
                tensorboard_writer.save_scalar('Classification_Loss',
                                               classification_loss.item(), itr)
                classification_loss.backward()
                optimizer.step()

                # Compute running loss. Not exact but efficient.
                running_loss = loss_running_average.add_new_sample(
                    classification_loss.item())
                progress_bar.update(i + 1, [
                    ('current loss', classification_loss.item()),
                    ('running loss', running_loss),
                ])
                tensorboard_writer.save_scalar('Training_Loss',
                                               classification_loss, itr)
                itr += 1

            # Callbacks ON_EPOCH_END should be run only when training is enabled. Thus call here.
            run_callbacks(callbacks,
                          model=model,
                          optimizer=optimizer,
                          mode=CallbackMode.ON_EPOCH_END,
                          epoch=epoch)
        """ Validate the model """
        val_data_args = arguments['val_data_args']
        if val_data_args['validate_step_size'] > 0 and \
                epoch % val_data_args['validate_step_size'] == 0:
            correct, total = 0, 0
            validation_dataloader = dataset.validation_dataloader
            progress_bar = ProgressBar(
                target=len(validation_dataloader),
                clear=True,
                description=f"Validating {epoch + 1}/{arguments['nb_epochs']}: "
            )
            model.eval()
            with torch.no_grad():
                for i, data in enumerate(validation_dataloader, 0):
                    inputs, labels = data
                    inputs = inputs.to(device)
                    labels = labels.to(device)

                    outputs = model(inputs)
                    _, predicted = torch.max(outputs.data, 1)
                    total += labels.size(0)
                    correct += (predicted == labels).sum().item()

                    progress_bar.update(i + 1, [
                        ('Batch Accuracy', 100 * correct / total),
                    ])

            val_accuracy = 100 * correct / total
            tensorboard_writer.save_scalar('Validation_Accuracy', val_accuracy,
                                           itr)
            logger.info(
                f'Accuracy of the network on the {dataset.get_val_dataset_size} validation images: {val_accuracy} %%'
            )
            """ Save Model """
            if val_accuracy > max_validation_accuracy:
                if delete_old_models and best_model_path:
                    delete_old_file(best_model_path)
                best_model_path = os.path.join(
                    outdir,
                    f'epoch_{epoch:04}-model-val_accuracy_{val_accuracy}.pth')
                torch.save(model.state_dict(), best_model_path)
                max_validation_accuracy = val_accuracy

        tensorboard_writer.flush()

        # Exit loop if training not needed
        if not train_data_args['to_train']:
            break

    run_callbacks(callbacks,
                  model=model,
                  optimizer=optimizer,
                  mode=CallbackMode.ON_TRAIN_END)

    logger.info('Finished Training')
    close_tensorboard()
    logger.info(f'Max Validation accuracy is {max_validation_accuracy}')
    return max_validation_accuracy  # Return in case later u wanna add hyperopt.