Esempio n. 1
0
    def download(self, name, path):
        """ Download model denoted by the given name to disk.

        :param name: Name of the model to download.
        :param path: Path of the directory to save model into.
        """
        url = '{}/{}/{}/{}/{}.tar.gz'.format(self._host, self._repository,
                                             self.RELEASE_PATH, self._release,
                                             name)
        get_logger().info('Downloading model archive %s', url)
        with requests.get(url, stream=True) as response:
            response.raise_for_status()
            archive = NamedTemporaryFile(delete=False)
            try:
                with archive as stream:
                    # Note: check for chunk size parameters ?
                    for chunk in response.iter_content(chunk_size=8192):
                        if chunk:
                            stream.write(chunk)
                get_logger().info('Validating archive checksum')
                if compute_file_checksum(archive.name) != self.checksum(name):
                    raise IOError('Downloaded file is corrupted, please retry')
                get_logger().info('Extracting downloaded %s archive', name)
                with tarfile.open(name=archive.name) as tar:
                    tar.extractall(path=path)
            finally:
                os.unlink(archive.name)
        get_logger().info('%s model file(s) extracted', name)
Esempio n. 2
0
 def __init__(self, config):
     super(StyleTextRec, self).__init__()
     self.logger = get_logger()
     self.text_generator = TextGenerator(config["Predictor"][
         "text_generator"])
     self.bg_generator = BgGeneratorWithMask(config["Predictor"][
         "bg_generator"])
     self.fusion_generator = FusionGeneratorSimple(config["Predictor"][
         "fusion_generator"])
     bg_generator_pretrain = config["Predictor"]["bg_generator"]["pretrain"]
     text_generator_pretrain = config["Predictor"]["text_generator"][
         "pretrain"]
     fusion_generator_pretrain = config["Predictor"]["fusion_generator"][
         "pretrain"]
     load_dygraph_pretrain(
         self.bg_generator,
         self.logger,
         path=bg_generator_pretrain,
         load_static_weights=False)
     load_dygraph_pretrain(
         self.text_generator,
         self.logger,
         path=text_generator_pretrain,
         load_static_weights=False)
     load_dygraph_pretrain(
         self.fusion_generator,
         self.logger,
         path=fusion_generator_pretrain,
         load_static_weights=False)
Esempio n. 3
0
 def __init__(self, config):
     self.logger = get_logger()
     self.logger.info("using NumberCorpus")
     self.num_list = "0123456789"
     self.en_char_list = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
     self.height = config["Global"]["image_height"]
     self.max_width = config["Global"]["image_width"]
Esempio n. 4
0
 def __init__(self, config, tag):
     self.logger = get_logger()
     self.output_dir = config["Global"]["output_dir"]
     self.counter = 0
     self.label_dict = {}
     self.tag = tag
     self.label_file_index = 0
Esempio n. 5
0
 def __init__(self):
     super(BaseAPIView).__init__()
     self.req_data = None
     self.start_time = None
     self.end_time = None
     self.duration = None
     self.logger = get_logger(__name__).bind(
         operation="{0}_view".format(self.operation))
Esempio n. 6
0
def run(args):
    log_dir = os.path.join(root_path, LOG_TOY_METRIC_PATH)
    logger = get_logger(args.dataset, log_dir, __file__)
    print = logger.info

    makedirs(osp.join(root_path, RESULT_TOY_METRIC_PATH, 'a.py'))
    for seed in range(1, 51):
        _run(args, seed, print)
Esempio n. 7
0
 def __init__(self, config):
     self.logger = get_logger()
     self.max_width = config["Global"]["image_width"]
     self.char_list = " 0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
     self.height = config["Global"]["image_height"]
     self.font_dict = {}
     self.load_fonts(config["TextDrawer"]["fonts"])
     self.support_languages = list(self.font_dict)
Esempio n. 8
0
def run(args):
    method = 'gp' if args.method == 'svgp' else args.method
    log_dir = osp.join(LOG_REG_PATH, '%s_%s' % (args.dataset, method))
    # setup logger
    logger = get_logger(args.dataset, log_dir, __file__)
    print = logger.info

    # set jitter
    if args.dataset in ['concrete']:
        gfs.settings.set_jitter(3e-5)
    if args.dataset in ['naval']:
        gfs.settings.set_jitter(1e-4)

    rmse_results, lld_results = [], []
    test_rmse_results, test_lld_results = [], []

    for seed in range(args.init_seed, args.n_runs + args.init_seed):
        rmse, ll, test_rmse, test_lld = _run(args, seed, print)
        rmse_results.append(rmse)
        lld_results.append(ll)
        test_rmse_results.append(test_rmse)
        test_lld_results.append(test_lld)

    print('test rmse = {}'.format(rmse_results))
    print('test log likelihood = {}'.format(lld_results))
    print("Test rmse =                   {}/{}".format(
        np.mean(rmse_results),
        np.std(rmse_results) / args.n_runs**0.5))
    print("Test log likelihood =         {}/{}".format(
        np.mean(lld_results),
        np.std(lld_results) / args.n_runs**0.5))
    print('NOTE: Test result above output mean and std. errors')

    result_dir = osp.join(RESULT_REG_PATH, '%s_%s' % (args.dataset, method))
    with open(osp.join(result_dir, 'res.json'), 'w') as f:
        res = {
            'rmse':
            [np.mean(rmse_results),
             np.std(rmse_results) / args.n_runs**0.5],
            'lld':
            [np.mean(lld_results),
             np.std(lld_results) / args.n_runs**0.5],
            'test_rmse': [
                np.mean(test_rmse_results),
                np.std(test_rmse_results) / args.n_runs**0.5
            ],
            'test_lld': [
                np.mean(test_lld_results),
                np.std(test_lld_results) / args.n_runs**0.5
            ],
            'all_rmse':
            rmse_results,
            'all_lld':
            lld_results
        }
        json.dump(res, f)
def run(args):
    log_dir = osp.join(LOG_AL_PRETRAIN_ORACLE_PATH, args.dataset)
    logger = get_logger(args.dataset, log_dir, __file__)
    print = logger.info

    # set jitter
    if args.dataset in ['concrete']:
        gfs.settings.set_jitter(3e-5)
    if args.dataset in ['naval']:
        gfs.settings.set_jitter(1e-4)

    for seed in range(args.init_seed, args.n_runs + args.init_seed):
        _run(args, seed, print)
    def save(self, path, data, sample_rate, codec=None, bitrate=None):
        """ Write waveform data to the file denoted by the given path
        using FFMPEG process.

        :param path: Path of the audio file to save data in.
        :param data: Waveform data to write.
        :param sample_rate: Sample rate to write file in.
        :param codec: (Optional) Writing codec to use.
        :param bitrate: (Optional) Bitrate of the written audio file.
        :raise IOError: If any error occurs while using FFMPEG to write data.
        """
        _check_ffmpeg_install()
        directory = os.path.dirname(path)
        if not os.path.exists(directory):
            raise Exception(f'output directory does not exists: {directory}')
        get_logger().debug('Writing file %s', path)
        input_kwargs = {'ar': sample_rate, 'ac': data.shape[1]}
        output_kwargs = {'ar': sample_rate, 'strict': '-2'}

        print(data[0][0].astype('<f4').tobytes())

        if bitrate:
            output_kwargs['audio_bitrate'] = bitrate
        if codec is not None and codec != 'wav':
            output_kwargs['codec'] = _to_ffmpeg_codec(codec)
        process = (ffmpeg.input(
            'pipe:', format='f32le', **input_kwargs).output(
                path,
                **output_kwargs).overwrite_output().run_async(pipe_stdin=True,
                                                              pipe_stderr=True,
                                                              quiet=True))
        try:
            process.stdin.write(data.astype('<f4').tobytes())
            process.stdin.close()
            process.wait()
        except IOError:
            raise Exception(f'FFMPEG error: {process.stderr.read()}')
        get_logger().info('File %s written succesfully', path)
Esempio n. 11
0
 def __init__(self, config):
     algorithm = config['Predictor']['algorithm']
     assert algorithm in ["StyleTextRec"
                          ], "Generator {} not supported.".format(algorithm)
     use_gpu = config["Global"]['use_gpu']
     check_gpu(use_gpu)
     self.logger = get_logger()
     self.generator = getattr(style_text_rec, algorithm)(config)
     self.height = config["Global"]["image_height"]
     self.width = config["Global"]["image_width"]
     self.scale = config["Predictor"]["scale"]
     self.mean = config["Predictor"]["mean"]
     self.std = config["Predictor"]["std"]
     self.expand_result = config["Predictor"]["expand_result"]
Esempio n. 12
0
def main(argv):
    """ Spleeter runner. Parse provided command line arguments
    and run entrypoint for required command (either train,
    evaluate or separate).

    :param argv: Provided command line arguments.
    """
    try:
        parser = create_argument_parser()
        arguments = parser.parse_args(argv[1:])
        enable_logging()
        if arguments.verbose:
            enable_tensorflow_logging()
        if arguments.command == 'separate':
            from commands.separate import entrypoint
        elif arguments.command == 'train':
            from commands.train import entrypoint
        elif arguments.command == 'evaluate':
            from commands.evaluate import entrypoint
        params = load_configuration(arguments.configuration)
        entrypoint(arguments, params)
    except SpleeterError as e:
        get_logger().error(e)
Esempio n. 13
0
    def __init__(self, config):
        self.logger = get_logger()
        self.logger.info("using FileCorpus")

        self.char_list = " 0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"

        corpus_file = config["CorpusGenerator"]["corpus_file"]
        self.language = config["CorpusGenerator"]["language"]
        with open(corpus_file, 'r') as f:
            corpus_raw = f.read()
        self.corpus_list = corpus_raw.split("\n")[:-1]
        assert len(self.corpus_list) > 0
        random.shuffle(self.corpus_list)
        self.index = 0
Esempio n. 14
0
    def __init__(self):
        self.FLAGS = ArgsParser().parse_args()
        self.config = load_config(self.FLAGS.config)
        self.config = override_config(self.config, options=self.FLAGS.override)
        self.output_dir = self.config["Global"]["output_dir"]
        if not os.path.exists(self.output_dir):
            os.mkdir(self.output_dir)
        self.logger = get_logger(
            log_file='{}/predict.log'.format(self.output_dir))

        self.text_drawer = text_drawers.StdTextDrawer(self.config)

        predictor_method = self.config["Predictor"]["method"]
        assert predictor_method is not None
        self.predictor = getattr(predictors, predictor_method)(self.config)
Esempio n. 15
0
 def safe_load(path, offset, duration, sample_rate, dtype):
     logger = get_logger()
     logger.info(
         f'Loading audio {path} from {offset} to {offset + duration}')
     try:
         (data, _) = self.load(path.numpy(),
                               offset.numpy(),
                               duration.numpy(),
                               sample_rate.numpy(),
                               dtype=dtype.numpy())
         logger.info('Audio data loaded successfully')
         return (data, False)
     except Exception as e:
         logger.exception('An error occurs while loading audio',
                          exc_info=e)
     return (np.float32(-1.0), True)
def run(args):
    method = 'gp' if args.method == 'svgp' else args.method
    base_method = 'gp' if args.base_method == 'svgp' else args.base_method
    log_dir = osp.join(LOG_AL_PREDICTION_PATH, '%s_%s_%s'%(args.dataset, method, base_method))
    logger = get_logger(args.dataset, log_dir, __file__)
    print = logger.info

    # set jitter
    if args.dataset in ['concrete']:
        gfs.settings.set_jitter(3e-5)
    if args.dataset in ['naval']:
        gfs.settings.set_jitter(1e-4)

    rmse_results, lld_results = [], []
    for seed in range(args.init_seed, args.n_runs+args.init_seed):
        rmse, ll = _run(args, seed, print)
        rmse_results.append(rmse)
        lld_results.append(ll)

    rmse_results = np.array(rmse_results)
    lld_results = np.array(lld_results)

    rmse_mean = np.mean(rmse_results, axis=0)
    rmse_var = np.var(rmse_results, axis=0)
    lld_mean = np.mean(lld_results, axis=0)
    lld_var = np.var(lld_results, axis=0)
    print("rmse_mean: %s" % rmse_mean.tolist())
    print("rmse_var: %s" % rmse_var.tolist())

    print("lld_mean: %s" % lld_mean.tolist())
    print("lld_var: %s" % lld_var.tolist())

    result_dir = osp.join(RESULT_AL_PATH, '%s_%s_%s.json'%(args.dataset, method, base_method))
    with open(result_dir, 'w') as f:
        res = {
            'dataset': args.dataset,
            'criteria': args.criteria,
            'rmse_mean': rmse_mean.tolist(),
            'rmse_var': rmse_var.tolist(),
            'lld_mean': lld_mean.tolist(),
            'lld_var': lld_var.tolist(),
            'rmse': rmse_results.tolist(),
            'lld': lld_results.tolist(),
        }
        json.dump(res, f)
Esempio n. 17
0
def main(args):
    # Setting the random seed throughout the modules
    if args.seed is not None:
        np.random.seed(args.seed)
        th.manual_seed(args.seed)
    args.env_args['seed'] = args.seed

    config = OmegaConf.to_container(args)
    if args.use_wandb:
        wandb.login()
        wandb.init(entity=args.wandb_entity,
                   project=args.wandb_project,
                   group=args.wandb_group,
                   config=config)

    # run the framework
    logger = get_logger()
    run(config, logger)
Esempio n. 18
0
import os
import collections
from os.path import dirname, abspath
from copy import deepcopy
from sacred import Experiment, SETTINGS
from sacred.observers import FileStorageObserver
from sacred.utils import apply_backspaces_and_linefeeds
import sys
import torch as th
from utils.logging import get_logger
import yaml

from run import run

SETTINGS['CAPTURE_MODE'] = "fd" # set to "no" if you want to see stdout/stderr in console
logger = get_logger()

ex = Experiment("pymarl")
ex.logger = logger
ex.captured_out_filter = apply_backspaces_and_linefeeds

results_path = os.path.join(dirname(dirname(abspath(__file__))), "results")


@ex.main
def my_main(_run, _config, _log):
    # Setting the random seed throughout the modules
    config = config_copy(_config)
    np.random.seed(config["seed"])
    th.manual_seed(config["seed"])
    config['env_args']['seed'] = config["seed"]
Esempio n. 19
0
from __future__ import absolute_import, unicode_literals

import logging
import os
import sys

from utils.logging import get_logger

CORE_MODULES_PATH = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), 'core_modules')
MODULES_PATHS = [CORE_MODULES_PATH]

logger = get_logger('module', logging.DEBUG)


class BaseModule(object):
    """
    Base class for creating a module.

    Attributes:
        id          Unique identifier for this module
        title       Title of the app
        description Description of the app
        dependencies    List of other module needed for this one to work.
        handlers    List of irc handlers
    """
    identifier = ''
    title = ''
    description = ''
    dependencies = []
    handlers = []
Esempio n. 20
0
def main_worker(local_rank: int, config: object):

    torch.cuda.set_device(local_rank)
    if config.distributed:
        raise NotImplementedError

    config.batch_size = config.batch_size // config.num_gpus_per_node
    config.num_workers = max(1, config.num_workers // config.num_gpus_per_node)

    in_channels = int(config.decouple_input) + 1

    # Model
    BACKBONE_CONFIGS, Backbone = AVAILABLE_MODELS[config.backbone_type]
    Projector = PROJECTOR_TYPES[config.projector_type]
    encoder = Backbone(BACKBONE_CONFIGS[config.backbone_config],
                       in_channels=in_channels)
    head = Projector(encoder.out_channels, config.projector_size)

    # Optimization
    params = [{'params': encoder.parameters()}, {'params': head.parameters()}]
    optimizer = get_optimizer(params=params,
                              name=config.optimizer,
                              lr=config.learning_rate,
                              weight_decay=config.weight_decay,
                              momentum=config.momentum)
    scheduler = get_scheduler(optimizer=optimizer,
                              name=config.scheduler,
                              epochs=config.epochs,
                              warmup_steps=config.warmup_steps)

    # Data
    data_kwargs = {
        'transform':
        WM811KTransform(size=config.input_size, mode='test'),
        'positive_transform':
        WM811KTransform(size=config.input_size, mode=config.augmentation),
        'decouple_input':
        config.decouple_input,
    }
    train_set = torch.utils.data.ConcatDataset([
        WM811KForWaPIRL('./data/wm811k/unlabeled/train/', **data_kwargs),
        WM811KForWaPIRL('./data/wm811k/labeled/train/', **data_kwargs),
    ])
    valid_set = torch.utils.data.ConcatDataset([
        WM811KForWaPIRL('./data/wm811k/unlabeled/valid/', **data_kwargs),
        WM811KForWaPIRL('./data/wm811k/labeled/valid/', **data_kwargs),
    ])
    test_set = torch.utils.data.ConcatDataset([
        WM811KForWaPIRL('./data/wm811k/unlabeled/test/', **data_kwargs),
        WM811KForWaPIRL('./data/wm811k/labeled/test/', **data_kwargs),
    ])

    # Experiment (WaPIRL)
    experiment_kwargs = {
        'backbone':
        encoder,
        'projector':
        head,
        'memory':
        MemoryBank(size=(len(train_set), config.projector_size),
                   device=local_rank),
        'optimizer':
        optimizer,
        'scheduler':
        scheduler,
        'loss_function':
        WaPIRLLoss(temperature=config.temperature),
        'loss_weight':
        config.loss_weight,
        'num_negatives':
        config.num_negatives,
        'distributed':
        config.distributed,
        'local_rank':
        local_rank,
        'metrics': {
            'top@1': TopKAccuracy(num_classes=1 + config.num_negatives, k=1),
            'top@5': TopKAccuracy(num_classes=1 + config.num_negatives, k=5)
        },
        'checkpoint_dir':
        config.checkpoint_dir,
        'write_summary':
        config.write_summary,
    }
    experiment = WaPIRL(**experiment_kwargs)

    if local_rank == 0:
        logfile = os.path.join(config.checkpoint_dir, 'main.log')
        logger = get_logger(stream=False, logfile=logfile)
        logger.info(f"Data: {config.data}")
        logger.info(f"Augmentation: {config.augmentation}")
        logger.info(f"Observations: {len(train_set):,}")
        logger.info(
            f"Trainable parameters ({encoder.__class__.__name__}): {encoder.num_parameters:,}"
        )
        logger.info(
            f"Trainable parameters ({head.__class__.__name__}): {head.num_parameters:,}"
        )
        logger.info(
            f"Projection head: {config.projector_type} ({config.projector_size})"
        )
        logger.info(f"Checkpoint directory: {config.checkpoint_dir}")
    else:
        logger = None

    # Train (WaPIRL)
    run_kwargs = {
        'train_set': train_set,
        'valid_set': valid_set,
        'epochs': config.epochs,
        'batch_size': config.batch_size,
        'num_workers': config.num_workers,
        'logger': logger,
        'save_every': config.save_every,
    }
    experiment.run(**run_kwargs)

    if logger is not None:
        logger.handlers.clear()
parser.add_argument('-r', '--rand', type=str, default='uniform')
parser.add_argument('-na', '--n_rand', type=int, default=5)
parser.add_argument('-nh', '--n_hidden', type=int, default=1)
parser.add_argument('-nu', '--n_units', type=int, default=50)
parser.add_argument('-bs', '--batch_size', type=int, default=20)
parser.add_argument('-lr', '--learning_rate', type=float, default=0.001)
parser.add_argument('-e', '--epochs', type=int, default=2000)
parser.add_argument('--n_eigen_threshold', type=float, default=0.99)
parser.add_argument('--train_samples', type=int, default=100)

parser.add_argument('--test_samples', type=int, default=100)
parser.add_argument('--print_interval', type=int, default=100)
parser.add_argument('--test_interval', type=int, default=100)

args = parser.parse_args()
logger = get_logger(args.dataset, 'results/regression/%s/' % args.dataset,
                    __file__)
print = logger.info


def run(seed):
    tf.reset_default_graph()

    ############################## load and normalize data ##############################
    dataset = uci_woval(args.dataset, seed=seed)
    train_x, test_x, train_y, test_y = dataset.x_train, dataset.x_test, dataset.y_train, dataset.y_test
    std_y_train = dataset.std_y_train[0]
    N, input_dim = train_x.shape

    lower_ap = np.minimum(np.min(train_x), np.min(test_x))
    upper_ap = np.maximum(np.max(train_x), np.max(test_x))
    mean_x_train, std_x_train = np.mean(train_x, 0), np.std(train_x, 0)
Esempio n. 22
0
from logging import DEBUG
import socket
from threading import Thread

import schedule
import six
import time

from irc.handlers import handle_connexion, handle_commands
from irc.parser import Response, InvalidResponse
from irc.sender import Sender
from utils.config import Configuration
from utils.logging import get_logger


logger = get_logger('irc', DEBUG)

HOST = 'irc.twitch.tv'
#HOST = 'localhost'
PORT = 6667


class Client(object):
    """ Connect to IRC and dispatch the messages to the handlers. """

    def __init__(self, pipe):
        self.pipe = pipe
        self.config = Configuration.get()
        self.socket = None
        self.handlers = []
        self.add_handler(handle_commands)
Esempio n. 23
0
from watchdog.events import FileSystemEventHandler
import paho.mqtt.client as mqtt
import json
import requests

from utils import logging
from threading import Thread
from threading import Timer
from probe import probe, run
import time
import os
import tempfile
import shutil
import re

logger = logging.get_logger('main', is_static=True)


class Feeder():
    def __init__(self):
        logger.debug("Initializing Feeder")

        self.office = list(map(float, os.environ["OFFICE"].split(",")))
        self.alg_id = None
        self.recording_volume = os.environ["STORAGE_VOLUME"]
        self.every_nth_frame = int(os.environ["EVERY_NTH_FRAME"])

        #Hosts
        self.dbhost = os.environ["DBHOST"]
        self.vahost = "http://localhost:8080/pipelines"
        self.mqtthost = os.environ["MQTTHOST"]
Esempio n. 24
0
SEED = 1
DATA_PATH = '/project/cq-training-1/project1/data/'
HDF5_8BIT = 'hdf5v7_8bit'
BATCH_LOG_INTERVAL = 50
VALID_PERC = 0.2
SLURM_TMPDIR = os.environ["SLURM_TMPDIR"] if "SLURM_TMPDIR" in os.environ else \
               glob.glob('/localscratch/'+os.environ['USER']+'*')[0]

# Setup writers for tensorboard
current_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
train_log_dir = 'logs/gradient_tape/' + current_time + '/train'
test_log_dir = 'logs/gradient_tape/' + current_time + '/valid'
train_summary_writer = tf.summary.create_file_writer(train_log_dir)
test_summary_writer = tf.summary.create_file_writer(test_log_dir)

logger = logging.get_logger()

# Metrics
train_mse_metric = tf.keras.metrics.MeanSquaredError()
valid_mse_metric = tf.keras.metrics.MeanSquaredError()
valid_csky_mse_metric = tf.keras.metrics.MeanSquaredError()


def train_epoch(model, data_loader, batch_size, loss_function, optimizer,
                total_examples, scale_label, use_csky):
    train_mse_metric.reset_states()
    for i, batch in tqdm(enumerate(data_loader),
                         total=(np.ceil(total_examples / batch_size)),
                         desc='train epoch',
                         leave=False):
        images, labels, csky = batch['images'], batch['ghi'], batch['csky_ghi']
Esempio n. 25
0
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
A script to benchmark data loading.
"""

import utils.logging as logging
from utils.benchmark import benchmark_data_loading
from utils.misc import launch_job
from utils.parser import load_config, parse_args

logger = logging.get_logger(__name__)


def main():
    args = parse_args()
    cfg = load_config(args)

    launch_job(cfg=cfg,
               init_method=args.init_method,
               func=benchmark_data_loading)


if __name__ == "__main__":
    main()
Esempio n. 26
0
import torch
import utils.logging as logging
from utils import lr_policy

logger = logging.get_logger('default')

def get_optim_params(model, cfg):
    normal_params = []
    feature_extract_params = []
    deformable_prediction_params = []
    for k, v in model.named_parameters():
        if v.requires_grad:
            if ('feature_extraction' in k) or ('fea_L2' in k) or ('fea_L3' in k):
                feature_extract_params.append(v)
            elif 'conv_offset_mask' in k:
                deformable_prediction_params.append(v)
            else:
                normal_params.append(v)
        # else:
        #     if self.rank <= 0:
        #         logger.warning('Params [{:s}] will not optimize.'.format(k))
    optim_params = [
        {  # add normal params first
            'params': normal_params,
            'lr': cfg.SOLVER.BASE_LR
        },
        {
            'params': feature_extract_params,
            'lr': cfg.SOLVER.BASE_LR
        },
        {
parser.add_argument('-il', '--init_logstd', type=float, default=-5.)
parser.add_argument('-na', '--n_rand', type=int, default=100)
parser.add_argument('-nh', '--n_hidden', type=int, default=2)
parser.add_argument('-nu', '--n_units', type=int, default=100)
parser.add_argument('-lr', '--learning_rate', type=float, default=0.003)
parser.add_argument('-e', '--epochs', type=int, default=30000)
parser.add_argument('--n_eigen_threshold', type=float, default=0.9)
parser.add_argument('--train_samples', type=int, default=500)

parser.add_argument('--test_samples', type=int, default=100)
parser.add_argument('--print_interval', type=int, default=100)
parser.add_argument('--test_interval', type=int, default=2000)

parser.add_argument('--seed', type=int, default=8)
args = parser.parse_args()
logger = get_logger(args.dataset, 'results/%s/' % (args.dataset), __file__)
print = logger.info
tf.set_random_seed(args.seed)
np.random.seed(args.seed)
xmin, xmax = 0., 1.
ymin, ymax = 0., 1.
lambda_, y_std = 3., 0.02

############################## setup FBNN model ##############################
prior_generator = dict(p_lin=PiecewiseLinear,
                       p_const=PiecewiseConstant)[args.dataset](lambda_, xmin,
                                                                xmax, ymin,
                                                                ymax).sample


def rand_generator(*arg):
import sys
from functools import wraps
from time import perf_counter

from utils.logging import get_logger

logger = get_logger(__name__)


def timer(f):
    @wraps(f)
    def wrapper(*args, **kwargs):
        start = perf_counter()
        resp = f(*args, **kwargs)
        end = perf_counter()
        logger.info('{}, {}'.format(f.__name__, end - start))
        return resp
    return wrapper


_intervals = (
    ('weeks', 604800, int),  # 60 * 60 * 24 * 7
    ('days', 86400, int),    # 60 * 60 * 24
    ('hours', 3600, int),    # 60 * 60
    ('minutes', 60, int),
    ('seconds', 1.0, float),
    )


def display_time(seconds):
    result = []
Esempio n. 29
0
from __future__ import absolute_import, unicode_literals

from logging import DEBUG
import time

import six

from utils.logging import get_logger

logger = get_logger('irc_sender', DEBUG)

# Max message per window
MAX_MESSAGE = 20
# Moderators can send 100 message per 30 seconds
MAX_MESSAGE_MOD = 100
# In seconds
WINDOW_SIZE = 30


class Sender(object):
    def __init__(self, channel):
        self.channel = channel
        self.buffer = []
        self.sent = {}

    # Handlers which generate the raw messages

    def raw(self, message, **kwargs):
        return message

    def who(self, message, **kwargs):
Esempio n. 30
0
import pika

from utils import logging
from utils.config import props

LOGGER = logging.get_logger()

RMQ_PORT = props["RABBIT_MQ"]["RMQ_PORT"]
RMQ_HOST = props["RABBIT_MQ"]["RMQ_HOST"]
RMQ_HEARTBEAT_INTERVAL = props.getint("RABBIT_MQ", "RMQ_HEARTBEAT_INTERVAL")
RMQ_INPUT_QUEUE = props["RABBIT_MQ"]["RMQ_INPUT_QUEUE_NAME"]
RMQ_OUTPUT_QUEUE = props["RABBIT_MQ"]["RMQ_OUTPUT_QUEUE_NAME"]
RMQ_EXCHANGE_QUEUE = props["RABBIT_MQ"]["RMQ_EXCHANGE_NAME"]
RMQ_USER = props.get("RABBIT_MQ", "RMQ_USERNAME")
RMQ_PASSWORD = props.get("RABBIT_MQ", "RMQ_PASSWORD")


def init_rabbitmq_connection():
    connection = pika.BlockingConnection(
        pika.ConnectionParameters(host="localhost", port=5672))
    channel = connection.channel()
    channel.queue_declare(queue=RMQ_INPUT_QUEUE)
    return connection, channel
Esempio n. 31
0
    logger.info("---- MAYA ----")
    logger.info("Command:               {}".format(" ".join(maya_command)))

    subprocess.check_call(maya_command, cwd=ROOT_DIR)

    # remove temporary files
    if not keep_temp:
        shutil.rmtree(temp_dir)


# ----------------------------------------------------------------------------

if __name__ == '__main__':
    # convert data
    config = flags.FLAGS
    config(sys.argv)

    # get logger
    logger = logging.get_logger(config.output_dir)

    # convert
    video2mocap(
        config.video_path,
        config.output_dir,
        config.keep_temp,
        config.mayapy_exe,
        config.python2_exe,
        config.python3_exe,
    )
Esempio n. 32
0
def main(args):
    """Main function."""

    # 1. Configurations
    torch.backends.cudnn.benchmark = True
    ENCODER_CONFIGS, DECODER_CONFIGS, Config, Encoder, Decoder = \
        AVAILABLE_MODELS[args.backbone_type]

    config = Config(args)
    config.save()

    logfile = os.path.join(config.checkpoint_dir, 'main.log')
    logger = get_logger(stream=False, logfile=logfile)

    # 2. Data
    input_transform = get_transform(config.data,
                                    size=config.input_size,
                                    mode=config.augmentation,
                                    noise=config.noise)
    target_transform = get_transform(config.data,
                                     size=config.input_size,
                                     mode='test')
    if config.data == 'wm811k':
        train_set = torch.utils.data.ConcatDataset([
            WM811KForDenoising('./data/wm811k/unlabeled/train/',
                               input_transform, target_transform),
            WM811KForDenoising('./data/wm811k/labeled/train/', input_transform,
                               target_transform),
        ])
        valid_set = torch.utils.data.ConcatDataset([
            WM811KForDenoising('./data/wm811k/unlabeled/valid/',
                               input_transform, target_transform),
            WM811KForDenoising('./data/wm811k/labeled/valid/', input_transform,
                               target_transform),
        ])
        test_set = torch.utils.data.ConcatDataset([
            WM811KForDenoising('./data/wm811k/unlabeled/test/',
                               input_transform, target_transform),
            WM811KForDenoising('./data/wm811k/labeled/test/', input_transform,
                               target_transform),
        ])
    else:
        raise ValueError(
            f"Denoising only supports 'wm811k' data. Received '{config.data}'."
        )

    # 3. Model
    encoder = Encoder(RESNET_ENCODER_CONFIGS[config.backbone_config],
                      in_channels=IN_CHANNELS[config.data])
    decoder = Decoder(RESNET_DECODER_CONFIGS[config.backbone_config],
                      input_shape=encoder.output_shape,
                      output_shape=(OUT_CHANNELS[config.data],
                                    config.input_size, config.input_size))

    # 4. Optimization
    params = [{
        'params': encoder.parameters()
    }, {
        'params': decoder.parameters()
    }]
    optimizer = get_optimizer(params=params,
                              name=config.optimizer,
                              lr=config.learning_rate,
                              weight_decay=config.weight_decay,
                              momentum=config.momentum)
    scheduler = get_scheduler(optimizer=optimizer,
                              name=config.scheduler,
                              epochs=config.epochs,
                              milestone=config.milestone,
                              warmup_steps=config.warmup_steps)

    # 5. Experiment (Denoising)
    experiment_kwargs = {
        'encoder': encoder,
        'decoder': decoder,
        'optimizer': optimizer,
        'scheduler': scheduler,
        'loss_function': nn.CrossEntropyLoss(reduction='mean'),
        'metrics': None,
        'checkpoint_dir': config.checkpoint_dir,
        'write_summary': config.write_summary,
    }
    experiment = Denoising(**experiment_kwargs)

    # 6. Run (train, evaluate, and test model)
    run_kwargs = {
        'train_set': train_set,
        'valid_set': valid_set,
        'test_set': test_set,
        'epochs': config.epochs,
        'batch_size': config.batch_size,
        'num_workers': config.num_workers,
        'device': config.device,
        'logger': logger,
        'save_every': config.save_every,
    }

    logger.info(f"Data: {config.data}")
    logger.info(f"Augmentation: {config.augmentation}")
    logger.info(
        f"Train : Valid : Test = {len(train_set):,} : {len(valid_set):,} : {len(test_set):,}"
    )
    logger.info(
        f"Trainable parameters ({encoder.__class__.__name__}): {encoder.num_parameters:,}"
    )
    logger.info(
        f"Trainable parameters ({decoder.__class__.__name__}): {decoder.num_parameters:,}"
    )
    logger.info(f"Saving model checkpoints to: {experiment.checkpoint_dir}")
    logger.info(
        f"Epochs: {run_kwargs['epochs']}, Batch size: {run_kwargs['batch_size']}"
    )
    logger.info(
        f"Workers: {run_kwargs['num_workers']}, Device: {run_kwargs['device']}"
    )

    steps_per_epoch = len(train_set) // config.batch_size + 1
    logger.info(f"Training steps per epoch: {steps_per_epoch:,}")
    logger.info(
        f"Total number of training iterations: {steps_per_epoch * config.epochs:,}"
    )

    experiment.run(**run_kwargs)
    logger.handlers.clear()
Esempio n. 33
0
def main(args):
    """Main function."""

    # 1. Configurations
    torch.backends.cudnn.benchmark = True
    BACKBONE_CONFIGS, Config, Backbone = AVAILABLE_MODELS[args.backbone_type]
    Projector = PROJECTOR_TYPES[args.projector_type]

    config = Config(args)
    config.save()

    logfile = os.path.join(config.checkpoint_dir, 'main.log')
    logger = get_logger(stream=False, logfile=logfile)

    # 2. Data
    if config.data == 'wm811k':
        data_transforms = {
            'transform':
            get_transform(data=config.data,
                          size=config.input_size,
                          mode='test'),
            'positive_transform':
            get_transform(
                data=config.data,
                size=config.input_size,
                mode=config.augmentation,
            ),
        }
        train_set = torch.utils.data.ConcatDataset([
            WM811KForPIRL('./data/wm811k/unlabeled/train/', **data_transforms),
            WM811KForPIRL('./data/wm811k/labeled/train/', **data_transforms),
        ])
        valid_set = torch.utils.data.ConcatDataset([
            WM811KForPIRL('./data/wm811k/unlabeled/valid/', **data_transforms),
            WM811KForPIRL('./data/wm811k/labeled/valid/', **data_transforms),
        ])
        test_set = torch.utils.data.ConcatDataset([
            WM811KForPIRL('./data/wm811k/unlabeled/test/', **data_transforms),
            WM811KForPIRL('./data/wm811k/labeled/test/', **data_transforms),
        ])
    else:
        raise ValueError(
            f"PIRL only supports 'wm811k' data. Received '{config.data}'.")

    # 3. Model
    backbone = Backbone(BACKBONE_CONFIGS[config.backbone_config],
                        in_channels=IN_CHANNELS[config.data])
    projector = Projector(backbone.out_channels, config.projector_size)

    # 4. Optimization
    params = [{
        'params': backbone.parameters()
    }, {
        'params': projector.parameters()
    }]
    optimizer = get_optimizer(params=params,
                              name=config.optimizer,
                              lr=config.learning_rate,
                              weight_decay=config.weight_decay,
                              momentum=config.momentum)
    scheduler = get_scheduler(optimizer=optimizer,
                              name=config.scheduler,
                              epochs=config.epochs,
                              milestone=config.milestone,
                              warmup_steps=config.warmup_steps)

    # 5. Experiment (PIRL)
    experiment_kwargs = {
        'backbone':
        backbone,
        'projector':
        projector,
        'memory':
        MemoryBank(size=(len(train_set), config.projector_size),
                   device=config.device),
        'optimizer':
        optimizer,
        'scheduler':
        scheduler,
        'loss_function':
        PIRLLoss(temperature=config.temperature),
        'loss_weight':
        config.loss_weight,
        'num_negatives':
        config.num_negatives,
        'metrics': {
            'top@1': TopKAccuracy(num_classes=1 + config.num_negatives, k=1),
            'top@5': TopKAccuracy(num_classes=1 + config.num_negatives, k=5)
        },
        'checkpoint_dir':
        config.checkpoint_dir,
        'write_summary':
        config.write_summary,
    }
    experiment = PIRL(**experiment_kwargs)

    # 6. Run (train, evaluate, and test model)
    run_kwargs = {
        'train_set': train_set,
        'valid_set': valid_set,
        'test_set': test_set,
        'epochs': config.epochs,
        'batch_size': config.batch_size,
        'num_workers': config.num_workers,
        'device': config.device,
        'logger': logger,
        'save_every': config.save_every,
    }

    logger.info(f"Data: {config.data}")
    logger.info(f"Augmentation: {config.augmentation}")
    logger.info(
        f"Train : Valid : Test = {len(train_set):,} : {len(valid_set):,} : {len(test_set):,}"
    )
    logger.info(
        f"Trainable parameters ({backbone.__class__.__name__}): {backbone.num_parameters:,}"
    )
    logger.info(
        f"Trainable parameters ({projector.__class__.__name__}): {projector.num_parameters:,}"
    )
    logger.info(f"Projector type: {config.projector_type}")
    logger.info(f"Projector dimension: {config.projector_size}")
    logger.info(f"Saving model checkpoints to: {experiment.checkpoint_dir}")
    logger.info(
        f"Epochs: {run_kwargs['epochs']}, Batch size: {run_kwargs['batch_size']}"
    )
    logger.info(
        f"Workers: {run_kwargs['num_workers']}, Device: {run_kwargs['device']}"
    )

    steps_per_epoch = len(train_set) // config.batch_size + 1
    logger.info(f"Training steps per epoch: {steps_per_epoch:,}")
    logger.info(
        f"Total number of training iterations: {steps_per_epoch * config.epochs:,}"
    )

    if config.resume_from_checkpoint is not None:
        logger.info(
            f"Resuming from a checkpoint: {config.resume_from_checkpoint}")
        model_ckpt = os.path.join(config.resume_from_checkpoint,
                                  'best_model.pt')
        memory_ckpt = os.path.join(config.resume_from_checkpoint,
                                   'best_memory.pt')
        experiment.load_model_from_checkpoint(
            model_ckpt)  # load model & optimizer
        experiment.memory.load(memory_ckpt)  # load memory bank

        # Assign optimizer variables to appropriate device
        for state in experiment.optimizer.state.values():
            for k, v in state.items():
                if isinstance(v, torch.Tensor):
                    state[k] = v.to(config.device)

    experiment.run(**run_kwargs)
    logger.handlers.clear()