Example #1
0
 def __init__(self, name, dump_path, create_logger=True):
     """
     Initialize the experiment.
     """
     self.name = name
     self.start_time = datetime.now()
     self.dump_path = os.path.join(dump_path, name)
     self.components = {}
     if not os.path.exists(self.dump_path):
         os.makedirs(self.dump_path)
     self.logs_path = os.path.join(self.dump_path, "experiment.log")
     if create_logger:
         self.log_formatter = utils.create_logger(self.logs_path)
Example #2
0
 def parse_config(self):
     """
     Reads the config data and sets up values
     """
     if not self.config:
         return False
     self.log_dir = self.config.get('log_dir', None)
     if not os.path.isdir(self.log_dir):
         os.makedirs(self.log_dir)
     log_file_name = os.path.join(self.log_dir, "artifactor_log.txt")
     self.logger = create_logger('artifactor_logger', log_file_name)
     if not os.path.isdir(self.log_dir):
         os.makedirs(self.log_dir)
     self.squash_exceptions = self.config.get('squash_exceptions', False)
     if not self.log_dir:
         print "!!! Log dir must be specified in yaml"
         sys.exit(127)
     self.setup_plugin_instances()
     self.start_server()
     self.global_data = {'artifactor_config': self.config, 'log_dir': self.config['log_dir'],
                         'artifacts': dict()}
import logging
import re
import traceback
import urllib
from bson import ObjectId
from datetime import datetime, timedelta
import requests
from multiprocessing import Pool, cpu_count

import neuralcoref
import spacy
from spacy.pipeline import EntityRuler
from config import config
import utils

app_logger = utils.create_logger('entity_gender_annotator_logger', log_dir='logs', logger_level=logging.INFO, file_log_level=logging.INFO)

# ========== Named Entity Merging functions ==========

# merge nes is a two step unification process:
# 1- Merge NEs based on exact match
# 2- merge NEs based on partial match
def merge_nes(doc_coref):
    # ne_dict and ne_cluster are dictionaries which keys are PERSON named entities extracted from the text and values
    #  are mentions of that named entity in the text. Mention clusters come from coreference clustering algorithm.
    ne_dict = {}
    ne_clust = {}
    # It's highly recommended to clean nes before merging them. They usually contain invalid characters
    person_nes = [x for x in doc_coref.ents if x.label_ == 'PERSON']
    # in this for loop we try to merge clusters detected in coreference clustering
Example #4
0
    def __init__(self, args):

        # get rank
        self.world_size = dist.get_world_size()
        self.rank = dist.get_rank()

        if self.rank == 0:
            # mkdir path
            if not os.path.exists('{}/events'.format(args.exp_path)):
                os.makedirs('{}/events'.format(args.exp_path))
            if not os.path.exists('{}/images'.format(args.exp_path)):
                os.makedirs('{}/images'.format(args.exp_path))
            if not os.path.exists('{}/logs'.format(args.exp_path)):
                os.makedirs('{}/logs'.format(args.exp_path))
            if not os.path.exists('{}/checkpoints'.format(args.exp_path)):
                os.makedirs('{}/checkpoints'.format(args.exp_path))

            # logger
            if args.trainer['tensorboard'] and not (args.extract
                                                    or args.evaluate):
                try:
                    from tensorboardX import SummaryWriter
                except:
                    raise Exception("Please switch off \"tensorboard\" "
                                    "in your config file if you do not "
                                    "want to use it, otherwise install it.")
                self.tb_logger = SummaryWriter('{}/events'.format(
                    args.exp_path))
            else:
                self.tb_logger = None
            if args.validate:
                self.logger = utils.create_logger(
                    'global_logger',
                    '{}/logs/log_offline_val.txt'.format(args.exp_path))
            elif args.extract:
                self.logger = utils.create_logger(
                    'global_logger',
                    '{}/logs/log_extract.txt'.format(args.exp_path))
            elif args.evaluate:
                self.logger = utils.create_logger(
                    'global_logger',
                    '{}/logs/log_evaluate.txt'.format(args.exp_path))
            else:
                self.logger = utils.create_logger(
                    'global_logger',
                    '{}/logs/log_train.txt'.format(args.exp_path))

        # create model
        self.model = models.__dict__[args.model['algo']](
            args.model, load_path=args.load_path, dist_model=True)

        # optionally resume from a checkpoint
        assert not (args.load_iter is not None and args.load_path is not None)
        if args.load_iter is not None:
            self.model.load_state("{}/checkpoints".format(args.exp_path),
                                  args.load_iter, args.resume)
            self.start_iter = args.load_iter
        else:
            self.start_iter = 0

        self.curr_step = self.start_iter

        # lr scheduler & datasets
        trainval_class = datasets.__dict__[args.data['trainval_dataset']]
        eval_class = (None if args.data['eval_dataset'] is None else
                      datasets.__dict__[args.data['eval_dataset']])
        extract_class = (None if args.data['extract_dataset'] is None else
                         datasets.__dict__[args.data['extract_dataset']])

        if not (args.validate or args.extract or args.evaluate):  # train
            self.lr_scheduler = utils.StepLRScheduler(
                self.model.optim,
                args.model['lr_steps'],
                args.model['lr_mults'],
                args.model['lr'],
                args.model['warmup_lr'],
                args.model['warmup_steps'],
                last_iter=self.start_iter - 1)

            train_dataset = trainval_class(args.data, 'train')
            train_sampler = utils.DistributedGivenIterationSampler(
                train_dataset,
                args.model['total_iter'],
                args.data['batch_size'],
                last_iter=self.start_iter - 1)
            self.train_loader = DataLoader(train_dataset,
                                           batch_size=args.data['batch_size'],
                                           shuffle=False,
                                           num_workers=args.data['workers'],
                                           pin_memory=False,
                                           sampler=train_sampler)

        if not (args.extract or args.evaluate):  # train or offline validation
            val_dataset = trainval_class(args.data, 'val')
            val_sampler = utils.DistributedSequentialSampler(val_dataset)
            self.val_loader = DataLoader(
                val_dataset,
                batch_size=args.data['batch_size_val'],
                shuffle=False,
                num_workers=args.data['workers'],
                pin_memory=False,
                sampler=val_sampler)

        if not (args.validate or args.extract
                ) and eval_class is not None:  # train or offline evaluation
            eval_dataset = eval_class(args.data, 'eval')
            assert len(eval_dataset) % (self.world_size * args.data['batch_size_eval']) == 0, \
                "Otherwise the padded samples will be involved twice."
            eval_sampler = utils.DistributedSequentialSampler(eval_dataset)
            self.eval_loader = DataLoader(
                eval_dataset,
                batch_size=args.data['batch_size_eval'],
                shuffle=False,
                num_workers=1,
                pin_memory=False,
                sampler=eval_sampler)

        if args.extract:  # extract
            assert extract_class is not None, 'Please specify extract_dataset'
            extract_dataset = extract_class(args.data, 'extract')
            extract_sampler = utils.DistributedSequentialSampler(
                extract_dataset)
            self.extract_loader = DataLoader(
                extract_dataset,
                batch_size=args.data['batch_size_extract'],
                shuffle=False,
                num_workers=1,
                pin_memory=False,
                sampler=extract_sampler)

        self.args = args
Example #5
0
def main(args):

    save_folder = '%s_%s' % (args.dataset, args.affix)

    log_folder = os.path.join(args.log_root, save_folder)
    model_folder = os.path.join(args.model_root, save_folder)

    makedirs(log_folder)
    makedirs(model_folder)

    setattr(args, 'log_folder', log_folder)
    setattr(args, 'model_folder', model_folder)

    logger = create_logger(log_folder, args.todo, 'info')

    print_args(args, logger)

    # Using a WideResNet model
    model = WideResNet(depth=34, num_classes=10, widen_factor=1, dropRate=0.0)
    flop, param = get_model_infos(model, (1, 3, 32, 32))
    logger.info('Model Info: FLOP = {:.2f} M, Params = {:.2f} MB'.format(
        flop, param))

    # Configuring the train attack mode
    if args.adv_train_mode == 'FGSM':
        train_attack = FastGradientSignUntargeted(model,
                                                  args.epsilon,
                                                  args.alpha,
                                                  min_val=0,
                                                  max_val=1,
                                                  max_iters=args.k,
                                                  _type=args.perturbation_type,
                                                  logger=logger)
    elif args.adv_train_mode == 'CW':
        mean = [0]
        std = [1]
        inputs_box = (min((0 - m) / s for m, s in zip(mean, std)),
                      max((1 - m) / s for m, s in zip(mean, std)))
        train_attack = carlini_wagner_L2.L2Adversary(targeted=False,
                                                     confidence=0.0,
                                                     search_steps=10,
                                                     optimizer_lr=5e-4,
                                                     logger=logger)

    # Configuring the test attack mode
    if args.adv_test_mode == 'FGSM':
        test_attack = FastGradientSignUntargeted(model,
                                                 args.epsilon,
                                                 args.alpha,
                                                 min_val=0,
                                                 max_val=1,
                                                 max_iters=args.k,
                                                 _type=args.perturbation_type,
                                                 logger=logger)
    elif args.adv_test_mode == 'CW':
        mean = [0]
        std = [1]
        inputs_box = (min((0 - m) / s for m, s in zip(mean, std)),
                      max((1 - m) / s for m, s in zip(mean, std)))
        test_attack = carlini_wagner_L2.L2Adversary(targeted=False,
                                                    confidence=0.0,
                                                    search_steps=10,
                                                    optimizer_lr=5e-4,
                                                    logger=logger)

    if torch.cuda.is_available():
        model.cuda()

    trainer = Trainer(args, logger, train_attack, test_attack)

    if args.todo == 'train':
        transform_train = tv.transforms.Compose([
            tv.transforms.ToTensor(),
            tv.transforms.Lambda(lambda x: F.pad(
                x.unsqueeze(0),
                (4, 4, 4, 4), mode='constant', value=0).squeeze()),
            tv.transforms.ToPILImage(),
            tv.transforms.RandomCrop(32),
            tv.transforms.RandomHorizontalFlip(),
            tv.transforms.ToTensor(),
        ])
        tr_dataset = tv.datasets.CIFAR10(args.data_root,
                                         train=True,
                                         transform=transform_train,
                                         download=True)

        tr_loader = DataLoader(tr_dataset,
                               batch_size=args.batch_size,
                               shuffle=True,
                               num_workers=4)

        # evaluation during training
        te_dataset = tv.datasets.CIFAR10(args.data_root,
                                         train=False,
                                         transform=tv.transforms.ToTensor(),
                                         download=True)

        te_loader = DataLoader(te_dataset,
                               batch_size=args.batch_size,
                               shuffle=False,
                               num_workers=4)

        trainer.train(model, tr_loader, te_loader, args.adv_train)
    elif args.todo == 'test':
        pass
    else:
        raise NotImplementedError
Example #6
0
import sys
import time
import curses
from collections import namedtuple
from itertools import cycle


from render import Renderer, Renderable
from weapon import Blaster, Laser, UM
from utils import Point, Event, Surface, Color, Layout, InfList

from utils import create_logger


log = create_logger("main_log", "main_app.log")


KEY = "KEY"
K_Q = ord("q")
K_E = ord("e")
K_A = ord("a")
K_D = ord("d")
K_SPACE = ord(" ")
K_ESCAPE = 27

MILLISECONDS_PER_FRAME = 16


class Spaceship(Renderable):
    def __init__(self, pos, border, owner):
    
    # specify local top-level log dir
    if args.log_dir is None:
        args.log_dir = ''
    args.log_dir = join(here, 'logs', args.log_dir)

    # specify scenarios log dir
    args.scenario_log_dir = 'scenario_logs'
    args.scenario_log_dir = join(args.log_dir, args.scenario_log_dir)

    # make the log dirs - log names should be unique
    if os.path.exists(args.log_dir):
        shutil.rmtree(args.log_dir)
    os.makedirs(args.scenario_log_dir)
    
    # create top-level log file
    logfile = join(args.log_dir, 'log.txt')
    log = create_logger(args.app_name, logfile, '%(asctime)s - %(message)s')
        
    # pre-processing
    for arg in ['network_id', 'scenario_ids', 'template_id']:
        if eval('args.%s' % arg) is not None:
            exec('args.%s = eval(args.%s)' % (arg, arg))
    
    argdict = args.__dict__.copy()
    argdict['hydra_password'] = '******'  # we need to encrypt this password
    argtuples = sorted(argdict.items())
    args_str = '\n\t'.join([''] + ['{}: {}'.format(a[0], a[1]) for a in argtuples])
    log.info('started model run with args: %s' % args_str)
    
    run_scenarios(args, log)
Example #8
0
            save_checkpoint(data_to_save, best_model_path, model_dir)

    logger.info(f'best score: {best_score:.04f}')
    return -best_score


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--weights', help='model to resume training', type=str)
    parser.add_argument('--fold', help='fold number', type=int, default=0)
    parser.add_argument('--predict',
                        help='model to resume training',
                        action='store_true')
    parser.add_argument('--num_tta',
                        help='number of TTAs',
                        type=int,
                        default=opt.TEST.NUM_TTAS)
    args = parser.parse_args()

    params = {'dropout': 0}

    opt.EXPERIMENT_DIR = os.path.join(opt.EXPERIMENT_DIR, f'fold_{args.fold}')
    opt.TEST.NUM_TTAS = args.num_tta

    if not os.path.exists(opt.EXPERIMENT_DIR):
        os.makedirs(opt.EXPERIMENT_DIR)

    logger = create_logger(os.path.join(opt.EXPERIMENT_DIR,
                                        'log_training.txt'))
    train_model(params)
Example #9
0
def train_model(params: Dict[str, str]) -> float:
    np.random.seed(0)

    hash = hashlib.sha224(str(params).encode()).hexdigest()[:8]
    model_dir = os.path.join(opt.EXPERIMENT_DIR, f'{hash}')

    if not os.path.exists(model_dir):
        os.makedirs(model_dir)

    str_params = str(params)
    hyperopt_logger.info('=' * 50)
    hyperopt_logger.info(f'hyperparameters: {str_params}')

    global logger
    log_file = os.path.join(model_dir, f'log_training.txt')
    logger.handlers = []
    logger = create_logger(log_file)

    logger.info('=' * 50)
    logger.info(f'model_dir: {model_dir}')
    logger.info(f'hyperparameters: {str_params}')

    train_loader, val_loader, test_loader = load_data(args.fold, params)
    model = create_model(args.predict, float(params['dropout']))
    # freeze_layers(model)

    # if torch.cuda.device_count() == 1:
    #     torchsummary.summary(model, (3, 224, 224))

    if opt.TRAIN.OPTIMIZER == 'Adam':
        optimizer = optim.Adam(model.parameters(), opt.TRAIN.LEARNING_RATE)
    elif opt.TRAIN.OPTIMIZER == 'SGD':
        optimizer = optim.SGD(model.parameters(), opt.TRAIN.LEARNING_RATE,
                              momentum=0.9, nesterov=True)
    else:
        assert False

    if opt.TRAIN.COSINE.ENABLE:
        set_lr(optimizer, opt.TRAIN.COSINE.LR)
        lr_scheduler = CosineLRWithRestarts(optimizer, opt.TRAIN.BATCH_SIZE,
            opt.TRAIN.BATCH_SIZE * opt.TRAIN.STEPS_PER_EPOCH,
            restart_period=opt.TRAIN.COSINE.PERIOD, t_mult=opt.TRAIN.COSINE.COEFF)
    else:
        lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='max',
                           patience=opt.TRAIN.PATIENCE, factor=opt.TRAIN.LR_REDUCE_FACTOR,
                           verbose=True, min_lr=opt.TRAIN.MIN_LR,
                           threshold=opt.TRAIN.MIN_IMPROVEMENT, threshold_mode='abs')

    if args.weights is None:
        last_epoch = 0
        logger.info(f'training will start from epoch {last_epoch+1}')
    else:
        last_checkpoint = torch.load(args.weights)
        assert(last_checkpoint['arch']==opt.MODEL.ARCH)
        model.load_state_dict(last_checkpoint['state_dict'])
        optimizer.load_state_dict(last_checkpoint['optimizer'])
        logger.info(f'checkpoint {args.weights} was loaded.')

        last_epoch = last_checkpoint['epoch']
        logger.info(f'loaded the model from epoch {last_epoch}')
        set_lr(optimizer, opt.TRAIN.LEARNING_RATE)


    if args.predict:
        print('inference mode')
        generate_submission(val_loader, test_loader, model, last_epoch, args.weights)
        sys.exit(0)

    if opt.TRAIN.LOSS == 'BCE':
        criterion = nn.BCEWithLogitsLoss()
    else:
        raise RuntimeError('unknown loss specified')

    best_score = 0.0
    best_epoch = 0

    last_lr = read_lr(optimizer)
    best_model_path = None

    for epoch in range(last_epoch + 1, opt.TRAIN.EPOCHS + 1):
        logger.info('-' * 50)

        if not opt.TRAIN.COSINE.ENABLE:
            lr = read_lr(optimizer)
            if lr < last_lr - 1e-10 and best_model_path is not None:
                # reload the best model
                last_checkpoint = torch.load(os.path.join(model_dir, best_model_path))
                assert(last_checkpoint['arch']==opt.MODEL.ARCH)
                model.load_state_dict(last_checkpoint['state_dict'])
                optimizer.load_state_dict(last_checkpoint['optimizer'])
                logger.info(f'checkpoint {best_model_path} was loaded.')
                set_lr(optimizer, lr)
                last_lr = lr

            if lr < opt.TRAIN.MIN_LR * 1.01:
                logger.info(f'lr={lr}, start cosine annealing!')
                set_lr(optimizer, opt.TRAIN.COSINE.LR)
                opt.TRAIN.COSINE.ENABLE = True

                lr_scheduler = CosineLRWithRestarts(optimizer, opt.TRAIN.BATCH_SIZE,
                    opt.TRAIN.BATCH_SIZE * opt.TRAIN.STEPS_PER_EPOCH,
                    restart_period=opt.TRAIN.COSINE.PERIOD, t_mult=opt.TRAIN.COSINE.COEFF)

        if opt.TRAIN.COSINE.ENABLE:
            lr_scheduler.step()

        read_lr(optimizer)

        train(train_loader, model, criterion, optimizer, epoch, lr_scheduler)
        score, _ = validate(val_loader, model, epoch)

        if not opt.TRAIN.COSINE.ENABLE:
            lr_scheduler.step(score)    # type: ignore

        is_best = score > best_score
        best_score = max(score, best_score)
        if is_best:
            best_epoch = epoch

        data_to_save = {
            'epoch': epoch,
            'arch': opt.MODEL.ARCH,
            'state_dict': model.state_dict(),
            'best_score': best_score,
            'score': score,
            'optimizer': optimizer.state_dict(),
            'options': opt
        }

        filename = opt.MODEL.VERSION
        if is_best:
            best_model_path = f'{filename}_f{args.fold}_e{epoch:02d}_{score:.04f}.pth'
            save_checkpoint(data_to_save, best_model_path, model_dir)

    logger.info(f'best score: {best_score:.04f}')
    hyperopt_logger.info(f'best score: {best_score:.04f}')
    return -best_score
Example #10
0
from abc import ABCMeta, abstractmethod

from utils import create_logger


log = create_logger(__name__, "render.log")


class Renderable(object, metaclass=ABCMeta):
    @abstractmethod
    def get_render_data(self):
        """Renderable.get_render_data(None) -> (gpos_list, data_gen)

        Every renderable object must return tuple consist of:
        * gpos_list: list of every Surface's global positions (List of Points)
          Example: [Point(x=5, y=5), Point(x=10, y=10)]

        * data_gen: generator which yields tuple (lpos, image, style)
          Example: (Point(x=5, y=5), "*", curses.A_BOLD)
        """
        pass


class Renderer(object):
    def __init__(self):
        self._objects = []


    def add_object(self, obj):
        self._objects.append(obj)
        log.debug("add object {} \nObjects: {}".format(obj, self._objects))
Example #11
0
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Libravatar.  If not, see <http://www.gnu.org/licenses/>.

import json
from os import link, path, umask
import sys

# pylint: disable=W0403
import settings
from resizeavatar import resize_image
from utils import create_logger, delete_if_exists, is_hex

umask(022)
logger = create_logger('changephoto')


def link_image(source_filename, destination_hash, size=None):
    if size:
        destination_filename = settings.AVATAR_ROOT + '%s/%s' % (size, destination_hash)
    else:
        destination_filename = settings.AVATAR_ROOT + destination_hash

    try:
        link(source_filename, destination_filename)
    except OSError:
        logger.error("Unable to link '%s' to %s" % (source_filename, destination_filename))


def create_links(source_filename, md5_hash, sha256_hash):
Example #12
0
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Libravatar.  If not, see <http://www.gnu.org/licenses/>.

import json
from os import link, path, umask
import sys

# pylint: disable=relative-import
import settings
from resizeavatar import resize_image
from utils import create_logger, delete_if_exists, is_hex

umask(022)
LOGGER = create_logger('changephoto')


def link_image(source_filename, destination_hash, size=None):
    if size:
        destination_filename = settings.AVATAR_ROOT + '%s/%s' % (size, destination_hash)
    else:
        destination_filename = settings.AVATAR_ROOT + destination_hash

    try:
        link(source_filename, destination_filename)
    except OSError:
        LOGGER.error("Unable to link '%s' to %s", source_filename, destination_filename)


def create_links(source_filename, md5_hash, sha256_hash):
Example #13
0
def decrypt_link(url, provider = None):
	# Detect provider from url
	if provider == None:
		for prov_s in _prov_s:
			search, host = prov_s
			if url.find(search) != -1:
				provider = host
	else:
		provider = provider.lower()

	# Decrypt if host is valid
	if provider in _prov_m:
		log.debug('Decrypting link "%s" for provider %s' % (url, provider))
		return _prov_m[provider](url)
	else:
		log.warning('No provider found for "%s"' % provider)
		return ''


if __name__ == '__main__':
	from utils import get_content, create_logger, text_finder, unquote
	log = create_logger()

	print(decrypt_link('http://vk.com/video_ext.php?oid=257082693&id=169050811&hash=6a52166caf33e3bf'))
	print(decrypt_link('http://embed.nowvideo.sx/embed.php?v=e98d57277349b'))
	print(decrypt_link('http://embed.videoweed.es/embed.php?v=66f8b27946d4b&width=760&height=430'))
	print(decrypt_link('http://embed.novamov.com/embed.php?width=760&height=430&v=e21bf91d87ab9&px=1'))
	print(decrypt_link('http://videobam.com/widget/MuJOF/custom/773'))
	print(decrypt_link('http://www.dailymotion.com/embed/video/x2ii256'))
	print(decrypt_link('https://www.amazon.com/clouddrive/share/E9ZN27pbKrU5DJh5cS-oE35a8F13HykhUpqer4qdZw0'))
	print(decrypt_link('https://www.amazon.com/clouddrive/share/akjsdhkajsd'))
Example #14
0
#
# You should have received a copy of the GNU Affero General Public License
# along with Libravatar.  If not, see <http://www.gnu.org/licenses/>.

from gearman import libgearman
import Image
import json
import os
import subprocess
import sys

# pylint: disable=W0403
import settings
from utils import create_logger, delete_if_exists, is_hex, is_hash_pair

logger = create_logger('cropresize')

MAX_PIXELS = 5000


def create_broken_image(broken, dest):
    delete_if_exists(dest)

    os.symlink(broken, dest)


def pil_format_to_ext(pil_format):
    if 'PNG' == pil_format:
        return '.png'
    elif 'JPEG' == pil_format:
        return '.jpg'
Example #15
0
import configparser

from abc import ABCMeta, abstractmethod

from utils import Point, Surface, create_logger


log = create_logger(__name__, "weapon.log")
config_file = "weapons.cfg"
config = configparser.SafeConfigParser(allow_no_value=True,
        interpolation=configparser.ExtendedInterpolation())
config.read(config_file)


class IWeapon(object, metaclass=ABCMeta):
    @abstractmethod
    def make_shot(self):
        """Make shot, if can't - raise Value Error"""
        pass


    @abstractmethod
    def update(self):
        """update coords list"""
        pass


def _load_from_config(weapon, config):
    section = weapon.__name__
    params = ("ammo", "max_ammo", "cooldown", "damage", "radius", "dy")
    return {var : config.get(section, var) for var in params}
Server: Microsoft-IIS/6.0(but not really)\r
p3p: CP='NOI ADMa OUR STP'\r
X-Powered-By: ASP.NET(actually altwfc's twisted python)\r
cluster-server: gstprdweb13.las1.colo.ignops.com\r
Content-Length: _CL\r
Content-Type: text/html\r
Set-Cookie: ASPSESSIONIDSQQQDSCC=GIPIIGECEJLOAHGENKMFFPOC; path=/\r
Cache-control: private\r\n\r\n"""

#Logger settings
logger_output_to_console = True
logger_output_to_file = True
logger_name = "TATVSCAPWIIgamestats2"
logger_filename = "tatvscapwii_gamestats2.log"
dbfilename = 'tatvscapwii_leaderboard.db'
logger = utils.create_logger(logger_name, logger_filename, -1, logger_output_to_console, logger_output_to_file)

conn = sqlite3.connect(dbfilename)
conn.cursor().execute('CREATE TABLE IF NOT EXISTS leaderboard (ingamesn TEXT, profileid INT UNIQUE, battlepoints INT, chartdata TEXT, utctimestamp INT)')
conn.commit()
conn.close()

def emptyreply():
  replypayload = '\x00'*200
  replypayload = lbheader.replace("A",struct.pack('B',0))+replypayload
  replypayload += hashlib.sha1(salt+base64.urlsafe_b64encode(replypayload)+salt).hexdigest()
  return httpheaders.replace("_CL",str(len(replypayload)))+replypayload

def leaderboard_best_ingame():
    replypayload = '' 
    conn = sqlite3.connect(dbfilename)
    def __init__(self, estimators_config, folds=3, verbose=False):
        self.estimators_config = estimators_config
        self.folds = folds

        self.logger = create_logger(self, verbose)
def run_scenario(scenario_id, args=None):
    
    logd = create_logger(appname='{} - {} - details'.format(args.app_name, scenario_id),
                         logfile=join(args.scenario_log_dir,'scenario_{}_details.txt'.format(scenario_id)),
                         msg_format='%(asctime)s - %(message)s')
    logp = create_logger(appname='{} - {} - progress'.format(args.app_name, scenario_id),
                         logfile=join(args.scenario_log_dir, 'scenario_{}_progress.txt'.format(scenario_id)),
                         msg_format='%(asctime)s - %(message)s')
    
    logd.info('starting scenario {}'.format(scenario_id))
    
    # get connection, along with useful tools attached
    conn = connection(args, scenario_id, args.template_id, logd)
    
    # time steps
    ti = datetime.strptime(args.initial_timestep, args.timestep_format)
    tf = datetime.strptime(args.final_timestep, args.timestep_format)
    dates = [date for date in rrule.rrule(rrule.MONTHLY, dtstart=ti, until=tf)]
    
    timestep_dict = OrderedDict()
    conn.OAtHPt = {}
    for date in dates:
        oat = date.strftime(args.timestep_format)
        hpt = date.strftime(args.hydra_timestep_format)
        timestep_dict[date] = [hpt, oat]
        conn.OAtHPt[oat] = hpt
        
    template_attributes = conn.call('get_template_attributes', {'template_id': conn.template.id})
    attr_names = {}
    for ta in template_attributes:
        attr_names[ta.id] = ta.name
        
    # create the model
    instance = prepare_model(model_name='OpenAgua',
                             network=conn.network,
                             template=conn.template,
                             attr_names=attr_names,
                             timestep_format=args.timestep_format,
                             timestep_dict=timestep_dict)
    
    logd.info('model created')
    opt = SolverFactory(args.solver)
    results = opt.solve(instance, tee=False)
    #logd.info('model solved')

    old_stdout = sys.stdout
    sys.stdout = summary = StringIO()
    results.write()
    sys.stdout = old_stdout
    
    logd.info('model solved\n' + summary.getvalue())
    
    if (results.solver.status == SolverStatus.ok) and (results.solver.termination_condition == TerminationCondition.optimal):
        # this is feasible and optimal
        logd.info('Optimal feasible solution found.')
        outputnames = {'S': 'storage', 'I': 'inflow', 'O': 'outflow'}
        #outputnames = {'I': 'inflow', 'O': 'outflow'}
        result = conn.save_results(instance, outputnames)
        logd.info('Results saved.')
    elif results.solver.termination_condition == TerminationCondition.infeasible:
        logd.info('WARNING! Problem is infeasible. Check detailed results.')
        # do something about it? or exit?
    else:
        # something else is wrong
        logd.info('WARNING! Something went wrong. Likely the model was not built correctly.')    
    
    # Still we will report that the model is complete...
    if args.foresight == 'perfect':
        msg = 'completed timesteps {} - {} | 1/1'.format(ti, tf)
        logd.info(msg)
        logp.info(msg)
    
    # ===========================
    # start the per timestep loop
    # ===========================
   
    #T = len(dates)
    #for t, date in enumerate(dates):
        
        # ===========================
        # prepare the time steps to use in the optimization run
        # ===========================        

        # ===========================
        # prepare the inflow forecast model
        # ===========================

        # For now, forecast based on mean monthly inflow at each catchment node
        # However, this can be changed in the future

        # ===========================
        # run the model
        # ===========================
        
        #if new_model:
            #model = create_model(data)
            #instance = model.create_instance()            
        #else:
            #instance = update_instance(instance, S0, inflow)
            #instance.preprocess()
            
        # solve the model
        #results = solver.solve(instance)
        
        # load the results
        #instance.solutions.load_from(results)
        
        # set initial conditions for the next time step
        #S0 = instance.S[isIDs[0]].value
        #if S0 is None:
            #S0 = 0.0
            
        # ===========================
        # save results to memory
        # ===========================
        
        
        #logd.info('completed timestep {} | {}/{}'.format(dt.date.strftime(date, args.timestep_format), t+1, T))
    
    # ===========================
    # save results to Hydra Server
    # ===========================
    
    
    return
Example #19
0
def main():
    global args, best_prec1, tb_writer

    args = parser.parse_args()
    tb_writer = create_logger(args)

    model = get_model(args.arch,
                      pretrained=args.pretrained,
                      num_classes=args.num_classes)

    model = torch.nn.DataParallel(model).cuda()

    # if args.quantize:
    #     global bin_op
    #     bin_op = quantization.Binarize(model)

    # define loss function (criterion) and optimizer
    criterion = nn.CrossEntropyLoss().cuda()

    param_grp_1 = []
    for m in model.modules():
        if isinstance(m, BinConv2d):
            param_grp_1.append(m.weight)
            param_grp_1.append(m.alpha)
            param_grp_1.append(m.beta)
            param_grp_1.append(m.gamma)
    param_grp_1_ids = list(map(id, param_grp_1))
    param_grp_2 = list(
        filter(lambda p: id(p) not in param_grp_1_ids, model.parameters()))

    if args.optimizer == "sgd":
        if args.quantize:
            optimizer = torch.optim.SGD([{
                'params': param_grp_1,
                'weight_decay': 0
            }, {
                'params': param_grp_2,
                'weight_decay': args.weight_decay
            }],
                                        lr=args.lr,
                                        momentum=args.momentum)
        else:
            optimizer = torch.optim.SGD(model.parameters(),
                                        args.lr,
                                        momentum=args.momentum,
                                        weight_decay=args.weight_decay)
    elif args.optimizer == "adam":
        if args.quantize:
            optimizer = torch.optim.Adam([{
                'params': param_grp_1,
                'weight_decay': 0
            }, {
                'params': param_grp_2,
                'weight_decay': args.weight_decay
            }],
                                         lr=args.lr)
        else:
            optimizer = torch.optim.Adam(model.parameters(),
                                         lr=args.lr,
                                         weight_decay=args.weight_decay)
    global scheduler
    scheduler = lr_scheduler.CosineAnnealingWarmRestarts(optimizer, 1, 2)
    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            best_prec1 = checkpoint['best_prec1']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    cudnn.benchmark = True

    # Data loading code
    traindir = os.path.join(args.data, 'train')
    valdir = os.path.join(args.data, 'val')
    # Normalize takes first mean and then std
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    train_dataset = datasets.ImageFolder(
        traindir,
        transforms.Compose([
            transforms.RandomResizedCrop(224),
            #transforms.Resize(256),
            #transforms.RandomCrop(224),
            transforms.ColorJitter(brightness=0.4,
                                   contrast=0.4,
                                   saturation=0.4),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            normalize,
        ]))

    # train_dataset = RandomDataset(
    #     traindir,
    #     transforms.Compose([
    #         transforms.RandomResizedCrop(224),
    #         transforms.ToTensor(),
    #         normalize,
    #     ]))

    train_sampler = None

    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=args.batch_size,
                                               shuffle=(train_sampler is None),
                                               num_workers=args.workers,
                                               pin_memory=True,
                                               sampler=train_sampler)

    val_loader = torch.utils.data.DataLoader(datasets.ImageFolder(
        valdir,
        transforms.Compose([
            transforms.Resize(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            normalize,
        ])),
                                             batch_size=args.batch_size,
                                             shuffle=False,
                                             num_workers=args.workers,
                                             pin_memory=True)
    # val_loader = torch.utils.data.DataLoader(
    #     RandomDataset(valdir, transforms.Compose([
    #         transforms.Resize(256),
    #         transforms.CenterCrop(224),
    #         transforms.ToTensor(),
    #         normalize,
    #     ])),
    #     batch_size=args.batch_size, shuffle=False,
    #     num_workers=args.workers, pin_memory=True)

    if args.evaluate:
        validate(val_loader, model, criterion, args.start_epoch)
        return

    for epoch in range(args.start_epoch, args.epochs):
        adjust_learning_rate(optimizer, epoch)

        # train for one epoch
        train(train_loader, model, criterion, optimizer, epoch)

        # evaluate on validation set
        prec1 = validate(val_loader, model, criterion, epoch)

        # remember best prec@1 and save checkpoint
        is_best = prec1 > best_prec1
        best_prec1 = max(prec1, best_prec1)
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'arch': args.arch,
                'state_dict': model.state_dict(),
                'best_prec1': best_prec1,
                'optimizer': optimizer.state_dict(),
            }, is_best)
opt.TRAIN.STEPS_PER_EPOCH = 7000
opt.TRAIN.RESUME = None if len(sys.argv) == 1 else sys.argv[1]

opt.VALID = edict()

if opt.TRAIN.SEED is None:
    opt.TRAIN.SEED = int(time.time())

random.seed(opt.TRAIN.SEED)
torch.manual_seed(opt.TRAIN.SEED)
torch.cuda.manual_seed(opt.TRAIN.SEED)

if not osp.exists(opt.EXPERIMENT.DIR):
    os.makedirs(opt.EXPERIMENT.DIR)

logger = create_logger(opt.LOG.LOG_FILE)
logger.info('Options:')
logger.info(pprint.pformat(opt))

msg = f'Use time as random seed: {opt.TRAIN.SEED}'
logger.info(msg)

DATA_INFO = cfg.DATASET

# Data-loader of training set
transform_train = transforms.Compose([
    transforms.RandomHorizontalFlip(),
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224,
                                                          0.225]),
])
Example #21
0
    parser = argparse.ArgumentParser()
    parser.add_argument("--weights", help="model to resume training", type=str)
    parser.add_argument("--predict",
                        help="model to resume training",
                        action='store_true')
    parser.add_argument("--fold", help="fold number", type=int, default=0)
    args = parser.parse_args()

    opt.EXPERIMENT.DIR = os.path.join(opt.EXPERIMENT.DIR, f'fold_{args.fold}')

    if not os.path.exists(opt.EXPERIMENT.DIR):
        os.makedirs(opt.EXPERIMENT.DIR)

    log_file = os.path.join(opt.EXPERIMENT.DIR, f'log_training.txt')
    logger = create_logger(log_file)
    logger.info('=' * 50)

    #print("available models:", pretrainedmodels.model_names)
    train_loader, val_loader, test_loader = load_data(args.fold)
    model = create_model(args.predict)
    # freeze_layers(model)

    # if torch.cuda.device_count() == 1:
    #     torchsummary.summary(model, (3, 224, 224))

    if opt.TRAIN.OPTIMIZER == 'Adam':
        optimizer = optim.Adam(model.parameters(), opt.TRAIN.LEARNING_RATE)
    elif opt.TRAIN.OPTIMIZER == 'SGD':
        optimizer = optim.SGD(model.parameters(),
                              opt.TRAIN.LEARNING_RATE,
Example #22
0
import base64
import gzip
import json
import os
import sys
from xml.sax import saxutils

import gearman

# pylint: disable=relative-import
import settings
from utils import create_logger, is_hex

os.umask(022)
LOGGER = create_logger('exportaccount')

SCHEMA_ROOT = 'https://www.libravatar.org/schemas/export/0.2'
SCHEMA_XSD = '%s/export.xsd' % SCHEMA_ROOT


def xml_header():
    return '''<?xml version="1.0" encoding="UTF-8"?>
<user xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
      xsi:schemaLocation="%s %s"
      xmlns="%s">\n''' % (SCHEMA_ROOT, SCHEMA_XSD, SCHEMA_ROOT)


def xml_footer():
    return '</user>\n'
Example #23
0
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.utils.tensorboard import SummaryWriter
from torch import nn

from model.networks.inception_siamese_network import (
    get_pretrained_iv3_transforms,
    InceptionSiameseNetwork,
)
from model.networks.light_siamese_network import (
    get_light_siamese_transforms,
    LightSiameseNetwork,
)
from utils import create_logger, readable_float, dynamic_report_key
from model.evaluate import RollingEval

logger = create_logger(__name__)


class QuasiSiameseNetwork(object):
    def __init__(self, args):
        input_size = (args.input_size, args.input_size)

        self.run_name = args.run_name
        self.input_size = input_size
        self.lr = args.learning_rate
        self.output_type = args.output_type

        network_architecture_class = InceptionSiameseNetwork
        network_architecture_transforms = get_pretrained_iv3_transforms
        if args.model_type == "light":
            network_architecture_class = LightSiameseNetwork
Example #24
0
     gencount=0
     gencounttotal=0
     poscount=0 

     #Get default values
     source_dir = config.source_dir_oxford
     source_file_type = config.source_file_type_oxford
     destination_dir = config.destination_dir_oxford
     destination_file_type =config.destination_file_type_oxford
     sample_file = config.sample_file_oxford
     sample_file_format = config.sample_file_format_oxford
     numthreads = multiprocessing.cpu_count()

          
     #Pass the script name to Log
     logger=utils.create_logger("gentotsv")
     
     start_time = time.time()
     print "Start time: "+time.ctime()
     utils.log(logger, "Start time: "+time.ctime())



     for opt,arg in opts:

           if opt=='-h':
              help = 1
              script_usage()


           elif opt=='-t':
    parser.add_argument(
        "-f", "--overwrite", 
        action='store_true',
        help="overwrite a file if it exists",
    )
    parser.add_argument(
        "-t", "--timeout", 
        type=int,
        default=None,
        help="? seconds per file time limit (timeout)",
    )
    return parser


if __name__ == '__main__':
    log = utils.create_logger()
    parser = create_parser()
    args = parser.parse_args()

    if isinstance(args.input_allc_files, list) and len(args.input_allc_files) > 0:
        input_allc_files = args.input_allc_files
    elif isinstance(args.input_allc_files_txt, str) and len(args.input_allc_files_txt) > 0:
        input_allc_files = utils.import_single_textcol(args.input_allc_files_txt)
    else:
        raise ValueError("no input files")
        
    input_bed_file = args.input_bed_file
    output_prefix = args.output_prefix

    bed_file_name_column = args.bed_file_name_column
    contexts = args.contexts
Example #26
0
# along with Libravatar.  If not, see <http://www.gnu.org/licenses/>.

import json
import os
import subprocess
import sys

import gearman
import Image

# pylint: disable=bare-except,relative-import
import settings
from utils import create_logger, delete_if_exists, is_hex, is_hash_pair

os.umask(022)
LOGGER = create_logger('cropresize')

MAX_PIXELS = 7000


def create_broken_image(broken, dest):
    delete_if_exists(dest)

    os.symlink(broken, dest)


def pil_format_to_ext(pil_format):
    if pil_format == 'PNG':
        return '.png'
    elif pil_format == 'JPEG':
        return '.jpg'
Example #27
0
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Libravatar.  If not, see <http://www.gnu.org/licenses/>.

from gearman import libgearman
import json
import os
import shutil
import sys

# pylint: disable=W0403
import settings
from utils import create_logger, delete_if_exists, is_hex, is_hash_pair

logger = create_logger('ready2user')


def main(argv=None):
    if argv is None:
        argv = sys.argv

    gearman_workload = sys.stdin.read()
    params = json.loads(gearman_workload)

    file_hash = params['file_hash']
    file_format = params['format']
    links = params['links']

    # Validate inputs
    if not is_hex(file_hash):
Example #28
0
# You should have received a copy of the GNU Affero General Public License
# along with Libravatar.  If not, see <http://www.gnu.org/licenses/>.

import json
import os
import shutil
import sys

import gearman

# pylint: disable=bare-except,relative-import
import settings
from utils import create_logger, delete_if_exists, is_hex, is_hash_pair

os.umask(022)
LOGGER = create_logger('ready2user')


def main(argv=None):
    if argv is None:
        argv = sys.argv

    gearman_workload = sys.stdin.read()
    params = json.loads(gearman_workload)

    file_hash = params['file_hash']
    file_format = params['format']
    links = params['links']

    # Validate inputs
    if not is_hex(file_hash):
Example #29
0
import json
import requests

import utils

LOG = utils.create_logger(__name__)


class Client(object):

    def __init__(self, endpoint, headers=None, timeout=3, retries=5):
        self.retries = retries
        self.endpoint = endpoint
        self.timeout = timeout
        self.headers = headers or {}

    def _inject_default_request_args(self, *args, **kwargs):
        if 'timeout' not in kwargs:
            kwargs['timeout'] = self.timeout
        headers = dict(self.headers)
        headers.update(kwargs.get('headers', {}))
        kwargs['headers'] = headers
        return args, kwargs

    def _do_request(self, f):
        for _ in range(self.retries):
            try:
                return f()
            except requests.Timeout as e:
                LOG.warning(str(e))
        raise Exception("Request timed out after %s retries", self.retries)
Example #30
0

 
# Constants
# utils.NEWLINE_REPLACEMENT = " " # Can be uncommented and edited
# utils.SUBARRAY_SEPARATOR = ";" # Can be uncommented and edited



if len(sys.argv) != 7:
     print "Passed arguments were: " + str(sys.argv)
     print "Arguments must be: export.py outputDir dbHost dbName dbUser dbPassword importGenotypeFlag"
     exit(0)


logger=utils.create_logger("export") 

output_dir = "./" + sys.argv[1]
print output_dir
output_dir = output_dir if output_dir[-1] == "/" else output_dir + "/"
connection_string = "dbname={1} host={0} user={2} password={3}".format(*sys.argv[2:])

importGenotypesFlag = None
sourcesDirectory = None


importGenotypesFlag = sys.argv[6]
if importGenotypesFlag == "Y":
  sourcesDirectory = config.source_converted_oxford
  file_type_oxford = config.destination_file_type_oxford
Example #31
0
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Libravatar.  If not, see <http://www.gnu.org/licenses/>.

import json
import os
import sys

# pylint: disable=relative-import
import settings
from utils import create_logger, delete_if_exists, is_hex

os.umask(022)
LOGGER = create_logger('deletephoto')


def main(argv=None):
    if argv is None:
        argv = sys.argv

    gearman_workload = sys.stdin.read()
    params = json.loads(gearman_workload)

    file_hash = params['file_hash']
    file_format = params['format']

    # Validate inputs
    if not is_hex(file_hash):
        LOGGER.error('file_hash is not a hexadecimal value')
Example #32
0
#!/usr/bin/env python
import os
import logging
import sys
sys.path.append("../shared/")
import utils
log = utils.create_logger(__file__)
# #log.setLevel(logging.DEBUG)
log.setLevel(logging.INFO)

# naive (not in place, memory intensive)
# def quick_sort(array):
#     if len(array) < 2:
#         return array
#     pivot = array[0]
#     array.remove(pivot)
#     less = []
#     greater = []
#     for item in array:
#         if item <= pivot:
#             less.append(item)
#         else:
#             greater.append(item)
#     return quick_sort(less) + [pivot] + quick_sort(greater)

# in-place (memory efficient)
def quick_sort(array, left, right):
    global __cmp_count__
    # empty or single item list
    if array and len(array) > 1 and left < right:
        log.debug("%s (size=%d)" % (str(array), len(array)))
Example #33
0
import os, pickle, math, random
import numpy as np
import pandas as pd
from scipy import stats
from sklearn.cluster import KMeans
from scipy.spatial import distance
from utils import create_dirpath, create_logger, ms_since_1970
from tensorflow.keras.models import load_model
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import ImageGrid
from sklearn.decomposition import PCA
from sklearn.metrics.pairwise import euclidean_distances

default_outdir = './logs/clustering'
logger = create_logger('clustering', logdir=default_outdir)

init_centroid_choices = ('mean', 'rand', 'first', 'kmeans++')


class LabelGuidedKMeans:
    '''
    Class represeting a set of label-guided k-means regions

    Properties
        regions      : list of all LabelGuidedKMeansRegion objects
        categories   : 1D of 2D array of unique categories
        n_categories : number of unique categories (labels)
    
    Functions
        fit            : generates regions from the input data (performs clustering)
        predict        : finds a matching region for a given x and y
Example #34
0
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Libravatar.  If not, see <http://www.gnu.org/licenses/>.

import Image
import json
import os
import sys

# pylint: disable=W0403
import settings
from utils import create_logger, is_hex

logger = create_logger('resizeavatar')


def resize_image(email_hash, size):
    original_filename = settings.AVATAR_ROOT + email_hash

    output_dir = settings.AVATAR_ROOT + '/%s' % size
    if not os.path.isdir(output_dir):
        os.mkdir(output_dir)
    resized_filename = '%s/%s' % (output_dir, email_hash)

    # Save resized image to disk
    original_img = Image.open(original_filename)
    resized_img = original_img.resize((size, size), Image.ANTIALIAS)
    resized_img.save(resized_filename, original_img.format, quality=settings.JPEG_QUALITY)
Example #35
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--local_rank",
                        type=int,
                        default=-1,
                        help="Multi-GPU - Local rank")
    parser.add_argument("--raw_src",
                        type=str,
                        default=None,
                        help="Tokenized source train file")
    parser.add_argument("--raw_tgt",
                        type=str,
                        default=None,
                        help="Tokenized target train file")
    parser.add_argument("--continue_path",
                        type=str,
                        default=None,
                        help="Where to reload checkpoint")
    parser.add_argument("--dump_path",
                        type=str,
                        default=None,
                        help="Where to store checkpoints")
    params = parser.parse_args()

    if params.raw_src is not None:
        config.SRC_RAW_TRAIN_PATH = params.raw_src
    if params.raw_tgt is not None:
        config.TGT_RAW_TRAIN_PATH = params.raw_tgt
    if params.continue_path is not None:
        config.continue_path = params.continue_path
    if params.dump_path is not None:
        config.dump_path = params.dump_path

    # Initialize distributed training
    if params.local_rank != -1:
        torch.cuda.set_device(params.local_rank)
        torch.distributed.init_process_group(backend="nccl",
                                             init_method='env://')
    trainer = Enc_Dec_Trainer(params)

    # Check whether dump_path exists, if not create one
    if params.local_rank == 0 or config.multi_gpu == False:
        if os.path.exists(config.dump_path) == False:
            os.makedirs(config.dump_path)

        # Save config in dump_path
        f = open(os.path.join(config.dump_path, "config.pkl"), 'wb')
        pickle.dump(config, f)
        f.close()
    torch.distributed.barrier()

    # Create logger for each process
    logger = create_logger(os.path.join(config.dump_path, 'train.log'),
                           rank=getattr(params, 'local_rank', 0))

    # Start epoch training
    for i_epoch in range(trainer.epoch_size):
        if trainer.epoch > trainer.epoch_size:
            break

        if config.multi_gpu == False or int(os.environ["NGPUS"]) == 1:
            # Single GPU, do not need to split dataset
            data_iter = iter(trainer.iterators["train"].get_iterator(
                True, True))
        else:
            if params.local_rank == 0:
                if os.path.exists(config.data_bin) == False:
                    os.makedirs(config.data_bin)

                # Split dataset into NGPUS subsets, with the same number of batches
                # Store NGPUS subsets in config.data_bin
                subset_batches = trainer.iterators["train"].get_batch_ids(
                    shuffle=True,
                    group_by_size=True,
                    num_subsets=int(os.environ["NGPUS"]))

                for i_sub in range(len(subset_batches)):
                    f = open(
                        os.path.join(config.data_bin, "batches_" + str(i_sub)),
                        'wb')
                    pickle.dump(subset_batches[i_sub], f)
                    f.close()

            torch.distributed.barrier()
            # Each process reads its own subset
            f = open(
                os.path.join(config.data_bin,
                             "batches_" + str(params.local_rank)), 'rb')
            subset_batches = pickle.load(f)
            f.close()
            data_iter = iter(trainer.iterators["train"].get_batches_iterator(
                subset_batches))
            num_train = sum([len(b) for b in subset_batches])
            trainer.num_train = num_train

        for i_batch, raw_batch in enumerate(data_iter):
            try:
                trainer.train_step(raw_batch)
                trainer.iter()
            except RuntimeError:
                continue

        scores = trainer.valid_step()
        trainer.save_best_model(scores)
        trainer.save_periodic()
        trainer.end_epoch(scores)
        torch.distributed.barrier()
Example #36
0
parser.add_argument('--print_freq', default=10, type=int)

args = parser.parse_args()

use_cuda = torch.cuda.is_available()

best_acc = 0  # best test accuracy
start_epoch = 0  # start from epoch 0 or last checkpoint epoch

torch.manual_seed(args.seed)

args.log_dir = args.log_dir + '_' + time.asctime(time.localtime(
    time.time())).replace(" ", "-")
os.makedirs('results/{}'.format(args.log_dir), exist_ok=True)
logger = create_logger('global_logger',
                       "results/{}/log.txt".format(args.log_dir))

wandb.init(
    project="dual_bn_v2",
    dir="results/{}".format(args.log_dir),
    name=args.log_dir,
)
wandb.config.update(args)

# Data
logger.info('==> Preparing data..')
if args.augment:
    transform_train = transforms.Compose([
        transforms.RandomCrop(32, padding=4),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
Example #37
0
    def __init__(self, args):

        # get rank
        self.world_size = dist.get_world_size()
        self.rank = dist.get_rank()

        if self.rank == 0:
            # mkdir path
            if not os.path.exists('{}/events'.format(args.exp_path)):
                os.makedirs('{}/events'.format(args.exp_path))
            if not os.path.exists('{}/images'.format(args.exp_path)):
                os.makedirs('{}/images'.format(args.exp_path))
            if not os.path.exists('{}/logs'.format(args.exp_path)):
                os.makedirs('{}/logs'.format(args.exp_path))
            if not os.path.exists('{}/checkpoints'.format(args.exp_path)):
                os.makedirs('{}/checkpoints'.format(args.exp_path))
    
            # logger
            if args.trainer['tensorboard'] and not args.extract:
                try:
                    from tensorboardX import SummaryWriter
                except:
                    raise Exception("Please switch off \"tensorboard\" in your config file if you do not want to use it, otherwise install it.")
                self.tb_logger = SummaryWriter('{}/events'.format(args.exp_path))
            else:
                self.tb_logger = None
            if args.validate:
                self.logger = utils.create_logger('global_logger', '{}/logs/log_offline_val.txt'.format(args.exp_path))
            elif args.extract:
                self.logger = utils.create_logger('global_logger', '{}/logs/log_extract.txt'.format(args.exp_path))
            else:
                self.logger = utils.create_logger('global_logger', '{}/logs/log_train.txt'.format(args.exp_path))
        
        # create model
        self.model = models.__dict__[args.model['arch']](args.model, dist_model=True)
    
        # optionally resume from a checkpoint
        assert not (args.load_iter is not None and args.load_path is not None)
        if args.load_iter is not None:
            self.model.load_state("{}/checkpoints".format(args.exp_path), args.load_iter, args.resume)
            self.start_iter = args.load_iter
        else:
            self.start_iter = 0
        if args.load_path is not None:
            self.model.load_pretrain(args.load_path)
        self.curr_step = self.start_iter

        # lr scheduler
        if not (args.validate or args.extract): # train
            self.lr_scheduler = utils.StepLRScheduler(self.model.optim, args.model['lr_steps'],
                args.model['lr_mults'], args.model['lr'], args.model['warmup_lr'],
                args.model['warmup_steps'], last_iter=self.start_iter-1)

        # Data loader
        if args.data['memcached']:
            from dataset_mc import McImageFlowDataset, McImageDataset
            imageflow_dataset = McImageFlowDataset
            image_dataset = McImageDataset
        else:
            from dataset import ImageFlowDataset, ImageDataset
            imageflow_dataset = ImageFlowDataset
            image_dataset = ImageDataset

        if not (args.validate or args.extract): # train
            train_dataset = imageflow_dataset(args.data['train_source'], args.data, 'train')
            train_sampler = utils.DistributedGivenIterationSampler(
                            train_dataset, args.model['total_iter'],
                            args.data['batch_size'], last_iter=self.start_iter-1)
            self.train_loader = DataLoader(
                train_dataset, batch_size=args.data['batch_size'], shuffle=False,
                num_workers=args.data['workers'], pin_memory=False, sampler=train_sampler)

        if not args.extract: # train or offline validation
            val_dataset = imageflow_dataset(args.data['val_source'], args.data, 'val')
            val_sampler = utils.DistributedSequentialSampler(val_dataset)
            self.val_loader = DataLoader(
                val_dataset, batch_size=args.data['batch_size_test'], shuffle=False,
                num_workers=args.data['workers'], pin_memory=False, sampler=val_sampler)
        else: # extract
            extract_dataset = image_dataset(args.extract_source, args.data)
            self.extract_metas = extract_dataset.metas
            extract_sampler = utils.DistributedSequentialSampler(extract_dataset)
            self.extract_loader = DataLoader(
                extract_dataset, batch_size=1, shuffle=False,
                num_workers=1, pin_memory=False, sampler=extract_sampler)

        self.args = args
Example #38
0
 def __init__(self, bot):
     self.bot: MyBot = bot
     self.logger = utils.create_logger(
         self.__class__.__name__, logging.INFO)
Example #39
0
    a = Autores(cnx, db_mongo)
    a.procesar()

    idiomas = json.load(open('idiomas.json'))
    langs = [d.get('lang') for d in idiomas]
    for l in langs:
        logger.info('Procesando palabras --> %s', l)
        im = Imagenes(cnx, db_mongo, l)
        im.inserta_palabras()
        logger.info('Procesando imagenes --> %s', l)
        im.inserta_imagenes()


if __name__ == '__main__':
    logger = create_logger()
    # for debug:
    address = ('0.0.0.0', 5000)
    ptvsd.enable_attach(address)
    print("attaching")
    ptvsd.wait_for_attach()
    # esperamos si está dockerizado a que el mysql se levante
    time.sleep(5)
    load_dotenv('.env')

    print("attached")
    breakpoint()
    print("attached2")

    # crear json con singulares (svgs) Descomentar para usar
    svgs = os.getenv('FOLDER_SVGS')
Example #40
0
from gevent import spawn, socket
from logging import INFO
from utils import create_logger
from config import VPNServerConfig
from net import VPNClientConnection

import traceback

server_logger = create_logger(name="PyVPN Server Logger", file="./server.log", level=INFO)

class VPNServer(object):
    args_config = []

    def __init__(self, **kwargs):
        self.logger = server_logger
        self.connections = {}
        self.address_pool = {}

        self.config = VPNServerConfig(path_to_config="./server.conf")

        overrides = ( (k,v) for k,v in kwargs.iteritems() if k in VPNServer.args_config )
        overrides = dict(overrides)
        self.config.update(overrides)

    def handle_connection(self, conn, addr):
        client_connection = None
        try:
            client_connection = VPNClientConnection(conn, self)
        except Exception as e: # handle case if auth error or something failed
            self.logger.error(e)
            self.logger.error(traceback.format_exc())
Example #41
0
def main():
    global args, config, X

    args = parser.parse_args()
    print(args)

    with open(args.config) as f:
        config = EasyDict(yaml.load(f))

    config.save_path = os.path.dirname(args.config)

    # regular set up
    assert torch.cuda.is_available()
    device = torch.device("cuda")
    config.device = device

    # random seed setup
    print("Random Seed: ", config.seed)
    random.seed(config.seed)
    torch.manual_seed(config.seed)
    torch.cuda.manual_seed(config.seed)
    cudnn.benchmark = True

    # regular set up end

    netG = torch.nn.DataParallel(NetG(ngf=config.ngf))
    netD = torch.nn.DataParallel(NetD(ndf=config.ndf))

    netF = torch.nn.DataParallel(NetF())
    netI = torch.nn.DataParallel(NetI()).eval()
    for param in netF.parameters():
        param.requires_grad = False

    criterion_MSE = nn.MSELoss()

    fixed_sketch = torch.tensor(0, device=device).float()
    fixed_hint = torch.tensor(0, device=device).float()
    fixed_sketch_feat = torch.tensor(0, device=device).float()

    ####################
    netD = netD.to(device)
    netG = netG.to(device)
    netF = netF.to(device)
    netI = netI.to(device)
    criterion_MSE = criterion_MSE.to(device)

    # setup optimizer

    optimizerG = optim.Adam(netG.parameters(),
                            lr=config.lr_scheduler.base_lr,
                            betas=(0.5, 0.9))
    optimizerD = optim.Adam(netD.parameters(),
                            lr=config.lr_scheduler.base_lr,
                            betas=(0.5, 0.9))

    last_iter = -1
    best_fid = 1e6

    if args.resume:
        best_fid, last_iter = load_state(args.resume, netG, netD, optimizerG,
                                         optimizerD)

    config.lr_scheduler['last_iter'] = last_iter

    config.lr_scheduler['optimizer'] = optimizerG
    lr_schedulerG = get_scheduler(config.lr_scheduler)
    config.lr_scheduler['optimizer'] = optimizerD
    lr_schedulerD = get_scheduler(config.lr_scheduler)

    tb_logger = SummaryWriter(config.save_path + '/events')
    logger = create_logger('global_logger', config.save_path + '/log.txt')
    logger.info(f'args: {pprint.pformat(args)}')
    logger.info(f'config: {pprint.pformat(config)}')

    batch_time = AverageMeter(config.print_freq)
    data_time = AverageMeter(config.print_freq)
    flag = 1
    mu, sigma = 1, 0.005
    X = stats.truncnorm((0 - mu) / sigma, (1 - mu) / sigma,
                        loc=mu,
                        scale=sigma)
    i = 0
    curr_iter = last_iter + 1

    dataloader = train_loader(config)
    data_iter = iter(dataloader)

    end = time.time()
    while i < len(dataloader):
        lr_schedulerG.step(curr_iter)
        lr_schedulerD.step(curr_iter)
        current_lr = lr_schedulerG.get_lr()[0]
        ############################
        # (1) Update D network
        ###########################
        for p in netD.parameters():  # reset requires_grad
            p.requires_grad = True  # they are set to False below in netG update
        for p in netG.parameters():
            p.requires_grad = False  # to avoid computation ft_params

        # train the discriminator Diters times
        j = 0
        while j < config.diters:
            netD.zero_grad()

            i += 1
            j += 1

            data_end = time.time()
            real_cim, real_vim, real_sim = data_iter.next()
            data_time.update(time.time() - data_end)

            real_cim, real_vim, real_sim = real_cim.to(device), real_vim.to(
                device), real_sim.to(device)
            mask = mask_gen()
            hint = torch.cat((real_vim * mask, mask), 1)

            # train with fake
            with torch.no_grad():
                feat_sim = netI(real_sim).detach()
                fake_cim = netG(real_sim, hint, feat_sim).detach()

            errD_fake = netD(fake_cim, feat_sim)
            errD_fake = errD_fake.mean(0).view(1)

            errD_fake.backward(retain_graph=True)  # backward on score on real

            errD_real = netD(real_cim, feat_sim)
            errD_real = errD_real.mean(0).view(1)
            errD = errD_real - errD_fake

            errD_realer = -1 * errD_real + errD_real.pow(2) * config.drift

            # backward on score on real
            errD_realer.backward(retain_graph=True)

            gradient_penalty = calc_gradient_penalty(netD, real_cim, fake_cim,
                                                     feat_sim)
            gradient_penalty.backward()

            optimizerD.step()

        ############################
        # (2) Update G network
        ############################

        for p in netD.parameters():
            p.requires_grad = False  # to avoid computation
        for p in netG.parameters():
            p.requires_grad = True
        netG.zero_grad()

        data = data_iter.next()
        real_cim, real_vim, real_sim = data
        i += 1

        real_cim, real_vim, real_sim = real_cim.to(device), real_vim.to(
            device), real_sim.to(device)

        if flag:  # fix samples
            mask = mask_gen()
            hint = torch.cat((real_vim * mask, mask), 1)
            with torch.no_grad():
                feat_sim = netI(real_sim).detach()

            tb_logger.add_image(
                'target imgs',
                vutils.make_grid(real_cim.mul(0.5).add(0.5), nrow=4))
            tb_logger.add_image(
                'sketch imgs',
                vutils.make_grid(real_sim.mul(0.5).add(0.5), nrow=4))
            tb_logger.add_image(
                'hint',
                vutils.make_grid((real_vim * mask).mul(0.5).add(0.5), nrow=4))

            fixed_sketch.resize_as_(real_sim).copy_(real_sim)
            fixed_hint.resize_as_(hint).copy_(hint)
            fixed_sketch_feat.resize_as_(feat_sim).copy_(feat_sim)

            flag -= 1

        mask = mask_gen()
        hint = torch.cat((real_vim * mask, mask), 1)

        with torch.no_grad():
            feat_sim = netI(real_sim).detach()

        fake = netG(real_sim, hint, feat_sim)

        save_training_images(real_sim[0], hint[0], fake[0], real_cim[0], i)

        errd = netD(fake, feat_sim)
        errG = errd.mean() * config.advW * -1
        errG.backward(retain_graph=True)
        feat1 = netF(fake)
        with torch.no_grad():
            feat2 = netF(real_cim)

        contentLoss = criterion_MSE(feat1, feat2)
        contentLoss.backward()

        optimizerG.step()
        batch_time.update(time.time() - end)

        ############################
        # (3) Report & 100 Batch checkpoint
        ############################
        curr_iter += 1

        if curr_iter % config.print_freq == 0:
            tb_logger.add_scalar('VGG MSE Loss', contentLoss.item(), curr_iter)
            tb_logger.add_scalar('wasserstein distance', errD.item(),
                                 curr_iter)
            tb_logger.add_scalar('errD_real', errD_real.item(), curr_iter)
            tb_logger.add_scalar('errD_fake', errD_fake.item(), curr_iter)
            tb_logger.add_scalar('Gnet loss toward real', errG.item(),
                                 curr_iter)
            tb_logger.add_scalar('gradient_penalty', gradient_penalty.item(),
                                 curr_iter)
            tb_logger.add_scalar('lr', current_lr, curr_iter)
            logger.info(
                f'Iter: [{curr_iter}/{len(dataloader)//(config.diters+1)}]\t'
                f'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                f'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
                f'errG {errG.item():.4f}\t'
                f'errD {errD.item():.4f}\t'
                f'err_D_real {errD_real.item():.4f}\t'
                f'err_D_fake {errD_fake.item():.4f}\t'
                f'content loss {contentLoss.item():.4f}\t'
                f'LR {current_lr:.4f}')

        if curr_iter % config.print_img_freq == 0:
            with torch.no_grad():
                fake = netG(fixed_sketch, fixed_hint, fixed_sketch_feat)
                tb_logger.add_image(
                    'colored imgs',
                    vutils.make_grid(fake.detach().mul(0.5).add(0.5), nrow=4),
                    curr_iter)

        if curr_iter % config.val_freq == 0:
            fid, var = validate(netG, netI)
            tb_logger.add_scalar('fid_val', fid, curr_iter)
            tb_logger.add_scalar('fid_variance', var, curr_iter)
            logger.info(f'fid: {fid:.3f} ({var})\t')

            # remember best fid and save checkpoint
            is_best = fid < best_fid
            best_fid = min(fid, best_fid)
            save_checkpoint(
                {
                    'step': curr_iter - 1,
                    'state_dictG': netG.state_dict(),
                    'state_dictD': netD.state_dict(),
                    'best_fid': best_fid,
                    'optimizerG': optimizerG.state_dict(),
                    'optimizerD': optimizerD.state_dict(),
                }, is_best, config.save_path + '/ckpt')

        end = time.time()
Example #42
0
 def __init__(self, bot):
     self.bot = bot
     self.logger = utils.create_logger(self)
Example #43
0
    return -best_score

if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--weights', help='model to resume training', type=str)
    parser.add_argument('--fold', help='fold number', type=int, default=0)
    parser.add_argument('--predict', help='model to resume training', action='store_true')
    args = parser.parse_args()

    opt.EXPERIMENT_DIR = os.path.join(opt.EXPERIMENT_DIR, f'fold_{args.fold}')

    if not os.path.exists(opt.EXPERIMENT_DIR):
        os.makedirs(opt.EXPERIMENT_DIR)

    hyperopt_log_file = os.path.join(opt.EXPERIMENT_DIR, 'log_hyperopt.txt')
    hyperopt_logger = create_logger(hyperopt_log_file, onscreen=False)
    logger = create_logger(filename=None)

    '''
    if int(params['vflip']):
    if int(params['rotate90']):
    if params['affine'] == 'soft':
    elif params['affine'] == 'medium':
    elif params['affine'] == 'hard':

    if float(params['noise']) > 0.1:
    if float(params['blur']) > 0.1:
    if float(params['distortion']) > 0.1:
    if float(params['color']) > 0.1:
    '''
Example #44
0
def main():
    global args, config, best_prec1
    args = parser.parse_args()

    with open(args.config) as f:
        config = yaml.load(f)

    config = EasyDict(config['common'])
    config.save_path = os.path.dirname(args.config)

    rank, world_size = dist_init()
    # print (args.Tmin, args.Tmax)

    # create model
    bn_group_size = config.model.kwargs.bn_group_size
    bn_var_mode = config.model.kwargs.get('bn_var_mode', 'L2')
    if bn_group_size == 1:
        bn_group = None
    else:
        assert world_size % bn_group_size == 0
        bn_group = simple_group_split(world_size, rank,
                                      world_size // bn_group_size)

    config.model.kwargs.bn_group = bn_group
    config.model.kwargs.bn_var_mode = (link.syncbnVarMode_t.L1 if bn_var_mode
                                       == 'L1' else link.syncbnVarMode_t.L2)
    model = model_entry(config.model)

    model.cuda()

    if config.optimizer.type == 'FP16SGD' or config.optimizer.type == 'FusedFP16SGD':
        args.fp16 = True
    else:
        args.fp16 = False

    if args.fp16:
        # if you have modules that must use fp32 parameters, and need fp32 input
        # try use link.fp16.register_float_module(your_module)
        # if you only need fp32 parameters set cast_args=False when call this
        # function, then call link.fp16.init() before call model.half()
        if config.optimizer.get('fp16_normal_bn', False):
            print('using normal bn for fp16')
            link.fp16.register_float_module(link.nn.SyncBatchNorm2d,
                                            cast_args=False)
            link.fp16.register_float_module(torch.nn.BatchNorm2d,
                                            cast_args=False)
            link.fp16.init()
        model.half()

    model = DistModule(model, args.sync)

    # create optimizer
    opt_config = config.optimizer
    opt_config.kwargs.lr = config.lr_scheduler.base_lr
    if config.get('no_wd', False):
        param_group, type2num = param_group_no_wd(model)
        opt_config.kwargs.params = param_group
    else:
        opt_config.kwargs.params = model.parameters()

    optimizer = optim_entry(opt_config)

    # optionally resume from a checkpoint
    last_iter = -1
    best_prec1 = 0
    if args.load_path:
        if args.recover:
            best_prec1, last_iter = load_state(args.load_path,
                                               model,
                                               optimizer=optimizer)
        else:
            load_state(args.load_path, model)

    cudnn.benchmark = True

    # Data loading code
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    # augmentation
    aug = [
        transforms.RandomResizedCrop(config.augmentation.input_size),
        transforms.RandomHorizontalFlip()
    ]

    for k in config.augmentation.keys():
        assert k in [
            'input_size', 'test_resize', 'rotation', 'colorjitter', 'colorold'
        ]
    rotation = config.augmentation.get('rotation', 0)
    colorjitter = config.augmentation.get('colorjitter', None)
    colorold = config.augmentation.get('colorold', False)

    if rotation > 0:
        aug.append(transforms.RandomRotation(rotation))

    if colorjitter is not None:
        aug.append(transforms.ColorJitter(*colorjitter))

    aug.append(transforms.ToTensor())

    if colorold:
        aug.append(ColorAugmentation())

    aug.append(normalize)

    # train
    train_dataset = McDataset(config.train_root,
                              config.train_source,
                              transforms.Compose(aug),
                              fake=args.fake)

    # val
    val_dataset = McDataset(
        config.val_root, config.val_source,
        transforms.Compose([
            transforms.Resize(config.augmentation.test_resize),
            transforms.CenterCrop(config.augmentation.input_size),
            transforms.ToTensor(),
            normalize,
        ]), args.fake)

    train_sampler = DistributedGivenIterationSampler(
        train_dataset,
        config.lr_scheduler.max_iter,
        config.batch_size,
        last_iter=last_iter)
    val_sampler = DistributedSampler(val_dataset, round_up=False)

    train_loader = DataLoader(train_dataset,
                              batch_size=config.batch_size,
                              shuffle=False,
                              num_workers=config.workers,
                              pin_memory=True,
                              sampler=train_sampler,
                              drop_last=True)

    val_loader = DataLoader(val_dataset,
                            batch_size=config.batch_size,
                            shuffle=False,
                            num_workers=config.workers,
                            pin_memory=True,
                            sampler=val_sampler,
                            drop_last=True)

    config.lr_scheduler['optimizer'] = optimizer.optimizer if isinstance(
        optimizer, FP16SGD) else optimizer
    config.lr_scheduler['last_iter'] = last_iter
    lr_scheduler = get_scheduler(config.lr_scheduler)

    if rank == 0:
        tb_logger = SummaryWriter(config.save_path + '/events')
        logger = create_logger('global_logger', config.save_path + '/log.txt')
        logger.info('args: {}'.format(pprint.pformat(args)))
        logger.info('config: {}'.format(pprint.pformat(config)))
    else:
        tb_logger = None

    if args.evaluate:
        if args.fusion_list is not None:
            validate(val_loader,
                     model,
                     fusion_list=args.fusion_list,
                     fuse_prob=args.fuse_prob)
        else:
            validate(val_loader, model)
        link.finalize()
        return

    train(train_loader, val_loader, model, optimizer, lr_scheduler,
          last_iter + 1, tb_logger)

    link.finalize()
import os


def print_and_log(string, logger):
    print(string)
    if logger:
        logger.info(string)


cur_time = time.strftime('%Y-%m-%d-%H-%M')
if not os.path.exists('output/' + model + '-' + cur_time):
    exp_name = cur_time
    out_path = 'output/' + model + '-' + cur_time
    os.makedirs(out_path)
    logger = create_logger('./{}/logs'.format(out_path), exp_name)
    print_and_log('Creating folder: {}'.format(out_path), logger)

import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
config = tf.ConfigProto()
#config.gpu_options.per_process_gpu_memory_fraction = 0.2
config.gpu_options.allow_growth = True
set_session(tf.Session(config=config))

f = open('./data/input_geo.pkl', 'rb')
data = pickle.load(f)
f.close()

train, test = train_test_split(data,
                               test_size=0.2,
Example #46
0
def main():
    args = parse_args()

    cudnn.benchmark = config.CUDNN.BENCHMARK
    torch.backends.cudnn.deterministic = config.CUDNN.DETERMINISTIC
    torch.backends.cudnn.enabled = config.CUDNN.ENABLED
    gpus = [int(i) for i in config.GPUS.split(',')]

    logger, final_output_dir, tb_log_dir = create_logger(
        config, args.cfg, 'test')

    # initialize generator and discriminator
    G_AB = eval('models.cyclegan.get_generator')(config.DATA.IMAGE_SHAPE,
                                                 config.NETWORK.NUM_RES_BLOCKS)
    G_BA = eval('models.cyclegan.get_generator')(config.DATA.IMAGE_SHAPE,
                                                 config.NETWORK.NUM_RES_BLOCKS)
    D_A = eval('models.cyclegan.get_discriminator')(config.DATA.IMAGE_SHAPE)
    D_B = eval('models.cyclegan.get_discriminator')(config.DATA.IMAGE_SHAPE)
    #logger.info(pprint.pformat(G_AB))
    #logger.info(pprint.pformat(D_A))

    # multi-gpus

    model_dict = {}
    model_dict['G_AB'] = torch.nn.DataParallel(G_AB, device_ids=gpus).cuda()
    model_dict['G_BA'] = torch.nn.DataParallel(G_BA, device_ids=gpus).cuda()
    model_dict['D_A'] = torch.nn.DataParallel(D_A, device_ids=gpus).cuda()
    model_dict['D_B'] = torch.nn.DataParallel(D_B, device_ids=gpus).cuda()

    # loss functions
    criterion_dict = {}
    criterion_dict['GAN'] = torch.nn.MSELoss().cuda()
    criterion_dict['cycle'] = torch.nn.L1Loss().cuda()
    criterion_dict['identity'] = torch.nn.L1Loss().cuda()

    if config.TEST.MODEL_FILE:
        _, model_dict, _ = load_checkpoint(model_dict, {},
                                           final_output_dir,
                                           is_train=False)
    else:
        logger.info('[error] no model file specified')
        assert 0

    #Buffers of previously generated samples
    fake_A_buffer = ReplayBuffer()
    fake_B_buffer = ReplayBuffer()

    # Image transformations
    transforms_ = [
        #transforms.Resize(int(config.img_height * 1.12), Image.BICUBIC),
        #transforms.RandomCrop((config.img_height, config.img_width)),
        #transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ]

    # Dataset
    logger.info('=> loading testing dataset...')

    test_dataset = ImageDataset(config.DATA.TEST_DATASET_B,
                                config.DATA.TEST_DATASET,
                                transforms_=transforms_,
                                mode='test')

    # Test data loader
    test_dataloader = DataLoader(test_dataset,
                                 batch_size=config.TEST.BATCH_SIZE * len(gpus),
                                 shuffle=False,
                                 num_workers=config.NUM_WORKERS)

    test(config, model_dict, test_dataloader, criterion_dict, final_output_dir)
    logger.info('=> finished testing, saving generated images to {}'.format(
        final_output_dir + '/images'))
Example #47
0
USE_L10N = True

USE_TZ = True


# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/

LOGIN_URL = '/login'
LOGOUT_URL = '/logout:'
LOGIN_REDIRECT_URL = '/'
STATIC_URL = '/static/'
# MEDIA_URL = 'js/'
GLOBAL_CONFIG = {}
logging_mode = 'both'
logger = create_logger(logging_mode, stream_level=logging.DEBUG)
logger_error = create_logger_error(logging_mode, stream_level=logging.DEBUG)

# GLOBAL_CONFIG['LOGGER'] = logger

config = ConfigParser.RawConfigParser()
config.read('/etc/register.cfg')

GLOBAL_CONFIG['LDAP_SERVER'] = config.get('LDAP', 'server')
GLOBAL_CONFIG['LDAP_USER'] = config.get('LDAP', 'bind_dn')
GLOBAL_CONFIG['LDAP_PASSWORD'] = config.get('LDAP', 'password')
GLOBAL_CONFIG['LDAP_BASE_OU'] = config.get('LDAP', 'user_search')
GLOBAL_CONFIG['project'] = ''
GLOBAL_CONFIG['admin'] = config.get('MAILING', 'admin')
GLOBAL_CONFIG['DEBUG_LVL'] = config.get('MAIN', 'debug_lvl')
Example #48
0
    samplecounttotal = 0
    gencount = 0
    gencounttotal = 0
    poscount = 0

    #Get default values
    source_dir = config.source_dir_oxford
    source_file_type = config.source_file_type_oxford
    destination_dir = config.destination_dir_oxford
    destination_file_type = config.destination_file_type_oxford
    sample_file = config.sample_file_oxford
    sample_file_format = config.sample_file_format_oxford
    numthreads = multiprocessing.cpu_count()

    #Pass the script name to Log
    logger = utils.create_logger("gentotsv")

    start_time = time.time()
    print "Start time: " + time.ctime()
    utils.log(logger, "Start time: " + time.ctime())

    for opt, arg in opts:

        if opt == '-h':
            help = 1
            script_usage()

        elif opt == '-t':

            global numtreads
            numthreads = arg
Example #49
0
def main(args):

    ###########################################################
    ## create folders to save models and logs
    save_folder = '%s_%d' % (args.dataset, args.task_id)
    name = 'a%.2f_%s_b1_%.2f_b2_%.2f_e%.2f_p%.2f_seed%d' % (
        args.alpha, args.p_d_bar_type, args.beta_1, args.beta_2, args.lambda_e,
        args.lambda_p, args.seed)
    if args.spectral_norm:
        name += '_spectral'

    name += args.affix

    log_folder = os.path.join(args.log_root, save_folder)
    makedirs(log_folder)

    model_folder = os.path.join(args.model_root, save_folder)
    makedirs(model_folder)

    setattr(args, 'log_folder', log_folder)
    setattr(args, 'model_folder', model_folder)

    setattr(args, 'save_folder', save_folder)
    setattr(args, 'name', name)

    logger = create_logger(log_folder, args.todo)

    print_args(args, logger)

    ###########################################################
    # preparing dataset
    ###########################################################

    if args.dataset == 'mnist':
        setattr(args, '2d', False)
        labeled_loader, unlabeled_loader, p_d, p_d_bar, dev_loader, p_d_2 = \
            all_data.get_mnist_loaders(args)

        # from mnist_trainer import Trainer

    elif args.dataset == 'svhn':
        setattr(args, '2d', False)

        labeled_loader, unlabeled_loader, p_d, p_d_bar, dev_loader, p_d_2 = \
            all_data.get_svhn_loaders(args)

        # from svhn_trainer import Trainer

    elif args.dataset == 'cifar':
        setattr(args, '2d', False)
        labeled_loader, unlabeled_loader, p_d, p_d_bar, dev_loader, p_d_2 = \
            all_data.get_cifar_loaders(args)

        # from cifar_trainer import Trainer

    else:
        raise NotImplementedError

    from trainer import Trainer

    # parameters for trainer
    trainer_dict = {'args': args, 'logger': logger}

    tr_data_dict = {
        'labeled_loader': labeled_loader,
        'unlabeled_loader': unlabeled_loader,
        'p_d': p_d,
        'p_d_bar': p_d_bar,
        'dev_loader': dev_loader,
        'p_d_2': p_d_2
    }

    trainer = Trainer(trainer_dict)

    err, err_per = trainer.train(tr_data_dict)
Example #50
0
def optimize(trial, args):

    setattr(args, 'hidden_dim',
            int(trial.suggest_categorical('d_model', [128, 256, 512])))
    setattr(args, 'depth',
            int(trial.suggest_discrete_uniform('n_enc', 2, 6, 1)))
    setattr(args, 'n_layers',
            int(trial.suggest_discrete_uniform('n_enc', 1, 3, 1)))
    setattr(args, 'lr', trial.suggest_loguniform('lr', 1e-5, 1e-2))
    setattr(args, 'batch_size',
            int(trial.suggest_categorical('batch_size', [16, 32, 64, 128])))

    setattr(args, 'log_dir',
            os.path.join(args.hyperopt_dir, str(trial._trial_id)))

    torch.manual_seed(0)
    train_logger = create_logger('train', args.log_dir)

    train_logger.info('Arguments are...')
    for arg in vars(args):
        train_logger.info(f'{arg}: {getattr(args, arg)}')

    # construct loader and set device
    train_loader, val_loader = construct_loader(args)
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # build model
    model_parameters = {
        'node_dim': train_loader.dataset.num_node_features,
        'edge_dim': train_loader.dataset.num_edge_features,
        'hidden_dim': args.hidden_dim,
        'depth': args.depth,
        'n_layers': args.n_layers
    }
    model = G2C(**model_parameters).to(device)

    # multi gpu training
    if torch.cuda.device_count() > 1:
        train_logger.info(
            f'Using {torch.cuda.device_count()} GPUs for training...')
        model = torch.nn.DataParallel(model)

    # get optimizer and scheduler
    optimizer, scheduler = get_optimizer_and_scheduler(
        args, model, len(train_loader.dataset))
    loss = torch.nn.MSELoss(reduction='sum')

    # record parameters
    train_logger.info(
        f'\nModel parameters are:\n{dict_to_str(model_parameters)}\n')
    save_yaml_file(os.path.join(args.log_dir, 'model_paramaters.yml'),
                   model_parameters)
    train_logger.info(f'Optimizer parameters are:\n{optimizer}\n')
    train_logger.info(f'Scheduler state dict is:')
    if scheduler:
        for key, value in scheduler.state_dict().items():
            train_logger.info(f'{key}: {value}')
        train_logger.info('')

    best_val_loss = math.inf
    best_epoch = 0

    model.to(device)
    train_logger.info("Starting training...")
    for epoch in range(1, args.n_epochs):
        train_loss = train(model, train_loader, optimizer, loss, device,
                           scheduler, logger if args.verbose else None)
        train_logger.info("Epoch {}: Training Loss {}".format(
            epoch, train_loss))

        val_loss = test(model, val_loader, loss, device, args.log_dir, epoch)
        train_logger.info("Epoch {}: Validation Loss {}".format(
            epoch, val_loss))
        if scheduler and not isinstance(scheduler, NoamLR):
            scheduler.step(val_loss)

        if val_loss <= best_val_loss:
            best_val_loss = val_loss
            best_epoch = epoch
            torch.save(model.state_dict(),
                       os.path.join(args.log_dir, f'epoch_{epoch}_state_dict'))
    train_logger.info("Best Validation Loss {} on Epoch {}".format(
        best_val_loss, best_epoch))

    train_logger.handlers = []
    return best_val_loss
Example #51
0
pip install django
'''
import logging
import os
import sys
import csv
import django
from django.conf import settings
from utils import Logger, create_logger
from Receiver.models import Node

''' SET UP LOGGING '''
# Setup logfile name
LOG_FILENAME = '/tmp/read_commands.log'
# Create logger
logger = create_logger(LOG_FILENAME, logging.INFO)
# Redirect stdout, stderr
sys.stdout = Logger(logger, logging.INFO)
sys.stderr = Logger(logger, logging.ERROR)
# Begin logging information
logger.info('Starting')
''' END LOGGING SETUP '''

''' SETUP DJANGO FOR DATABASE PULLS '''
# Add directory to system path
sys.path.insert(1, '/home/pi/ccasp/Microcontrollers/Microcontrollers/')
# Add DJANGO_SETTINGS_MODULE to environmental variables
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings")
# Setup Django
django.setup()
''' END DJANGO SETUP '''
    return args


if __name__ == '__main__':
    args = parse_args()
    horizon = configuration.TASK.HORIZON
    batch_size = configuration.TRAIN.BATCH_SIZE
    num_iterations = configuration.TRAIN.NUM_ITERATIONS
    #step =  num_iterations//100

    train_scene_list = os.listdir(
        './data/datasets/pointnav/gibson/v1/train/content/')
    episodes_per_train_scene = configuration.TRAIN.EPISODES_PER_SCENE

    logger, final_output_dir, tb_log_dir = create_logger(
        configuration, args.cfg, 'train')

    habitat_config = habitat.get_config(
        config_file='tasks/pointnav_gibson.yaml')
    habitat_config.defrost()
    #habitat_config.DATASET.DATA_PATH = '/data/datasets/pointnav/gibson/v1/train/content'
    habitat_config.DATASET.SCENES_DIR = '/data/scene_datasets/gibson'
    habitat_config.SIMULATOR.AGENT_0.SENSORS = ['RGB_SENSOR']
    habitat_config.SIMULATOR.TURN_ANGLE = 45
    habitat_config.ENVIRONMENT.MAX_EPISODE_STEPS = horizon - 1
    habitat_config.freeze()

    environment = HabitatWrapper(configuration, habitat_config)
    environment.reset()
    if configuration.TRAIN.OPTIMIZER == 'adam':
        optimizer = Adam(learning_rate=configuration.TRAIN.LR)
Example #53
0
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Libravatar.  If not, see <http://www.gnu.org/licenses/>.

import json
import os
import sys

# pylint: disable=W0403
import settings
from utils import create_logger, delete_if_exists, is_hex

os.umask(022)
logger = create_logger('deletephoto')


def main(argv=None):
    if argv is None:
        argv = sys.argv

    gearman_workload = sys.stdin.read()
    params = json.loads(gearman_workload)

    file_hash = params['file_hash']
    file_format = params['format']

    # Validate inputs
    if not is_hex(file_hash):
        logger.error('file_hash is not a hexadecimal value')
import requests
import pymongo
from urllib.parse import quote
import time
import datetime
import random
import json
import utils
import os

logger = utils.create_logger(__name__)
uri = os.environ.get("MONGO_URI")
client = pymongo.MongoClient(uri)
db = client["news"]


def get_new_ids(start_time):
    rs = db["news_list"].find(
        {"published_at": {
            "$gte": datetime.datetime.timestamp(start_time)
        }})
    new_ids = [r["id"] for r in rs]
    rs = db["news_data"].find({})
    old_ids = [r["uuid"] for r in rs]
    return list(set(new_ids) - set(old_ids))


def get_content(id_list, replace=False):
    url = "https://tw.news.yahoo.com/_td-news/api/resource/content;fetchNewAttribution=true;getDetailView=true;getFullLcp=false;imageResizer=null;relatedContent=%7B%22enabled%22%3Atrue%7D;site=news;uuids={}"

    querystring = {
Example #55
0
# You should have received a copy of the GNU Affero General Public License
# along with Libravatar.  If not, see <http://www.gnu.org/licenses/>.

import base64
from gearman import libgearman
import gzip
import json
import os
import sys
from xml.sax import saxutils

# pylint: disable=W0403
import settings
from utils import create_logger, is_hex

logger = create_logger('exportaccount')

SCHEMA_ROOT = 'https://www.libravatar.org/schemas/export/0.2'
SCHEMA_XSD = '%s/export.xsd' % SCHEMA_ROOT


def xml_header():
    return '''<?xml version="1.0" encoding="UTF-8"?>
<user xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
      xsi:schemaLocation="%s %s"
      xmlns="%s">\n''' % (SCHEMA_ROOT, SCHEMA_XSD, SCHEMA_ROOT)


def xml_footer():
    return '</user>\n'
Example #56
0
#
# You should have received a copy of the GNU Affero General Public License
# along with Libravatar.  If not, see <http://www.gnu.org/licenses/>.

import json
import os
import sys

import Image

# pylint: disable=relative-import
import settings
from utils import create_logger, is_hex

os.umask(022)
LOGGER = create_logger('resizeavatar')


def resize_image(email_hash, size):
    original_filename = settings.AVATAR_ROOT + email_hash

    output_dir = settings.AVATAR_ROOT + '/%s' % size
    if not os.path.isdir(output_dir):
        os.mkdir(output_dir)
    resized_filename = '%s/%s' % (output_dir, email_hash)

    # Save resized image to disk
    original_img = Image.open(original_filename)
    resized_img = original_img.resize((size, size), Image.ANTIALIAS)
    resized_img.save(resized_filename, original_img.format, quality=settings.JPEG_QUALITY)