예제 #1
0
 def __init__(self):
     super(Forecaster, self).__init__()
     self._trained = False
     self._device = None
     self._train_loss = None
     self._validation_loss = None
     self._validation_forecast = None
     self._logger = get_logger(self.__class__.__name__)
예제 #2
0
def main(argv):
    options = get_options(argv)
    logger = get_logger(log_name='webapp.log', verbose=options['verbose'])
    if 'query' in options:
        search_by_query(options['query'], logger)
    elif 'asin' in options:
        try:
            search_by_asin(options['asin'], logger)
        except AccessBlocked as e:
            print('Error access blocked by Amazon: {e}.'.format(e=e))
            exit(1)
    else:
        print('Invalid options.')
        exit_usage()
예제 #3
0
def main(argv):
  command, options = get_options(argv)
  logger = get_logger(log_name='webapp.log', verbose=options['verbose'])
  if command == 'product':
    if 'query' in options:
      print('Not supported.')
    elif 'asin' in options:
      get_product_by_asin(options['asin'], logger)
    else:
      print('Either of Query or ASIN must be given.')
  elif command == 'asin':
    if 'query' in options:
      get_asin_by_query(options['query'], logger)
    elif 'asin' in options:
      print('Not supported.')
    else:
      print('Either of Query or ASIN must be given.')
  else:
    print('Invalid option.')
    exit_usage()
예제 #4
0
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim.lr_scheduler import MultiStepLR, CosineAnnealingLR

from utils.options import args
from model.cifar10.shiftresnet import *
import torch.backends.cudnn as cudnn


def _make_dir(path):
    if not os.path.exists(path): os.makedirs(path)


ckpt = utils.checkpoint(args)
print_logger = utils.get_logger(os.path.join(args.job_dir, "logger.log"))
utils.print_params(vars(args), print_logger.info)
writer_train = SummaryWriter(args.job_dir + '/run/train')
writer_test = SummaryWriter(args.job_dir + '/run/test')


def main():
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    cudnn.benchmark = True

    start_epoch = args.start_epoch

    lr_decay_step = list(map(int, args.lr_decay_step.split(',')))

    # Data loading
    print_logger.info('=> Preparing data..')
예제 #5
0
torch.manual_seed(args.seed)  ##for cpu
if args.gpu:
    torch.cuda.manual_seed(args.seed)  ##for gpu

if not os.path.isdir(args.job_dir):
    os.makedirs(args.job_dir)

if len(args.device_ids) == 1:
    args.device = torch.device(
        "cuda:0" if torch.cuda.is_available() else "cpu")
else:
    args.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

utils.record_config(args)
now = datetime.datetime.now().strftime('%Y-%m-%d-%H:%M:%S')
logger = utils.get_logger(os.path.join(args.job_dir, 'logger' + now + '.log'))

#use for loading pretrain model
if len(args.gpu) > 1:
    args.name_base = 'module.'
else:
    args.name_base = ''


def main():
    start_t = time.time()

    cudnn.benchmark = True
    cudnn.enabled = True
    logger.info("args = %s", args)
예제 #6
0
from io import StringIO, BytesIO
from pprint import pprint
from PyPDF2 import PdfFileReader, utils
from PIL import Image
import imagehash
from tempfile import TemporaryDirectory, mkdtemp

from utils.common import get_logger
from utils.constants import SMUR, AM, P, VOR
from utils.patterns import DOI_PATTERN, ALL_CC_LICENCES, RIGHTS_RESERVED_PATTERNS, VERSION_PATTERNS
from utils.logos import PublisherLogo


# # logging.config.fileConfig('logging.conf', defaults={'logfilename': 'artemis.log'})
# logger = logging.getLogger(__name__)
logger = get_logger()


# LOGOS_DB_PATH = os.path.join(os.path.realpath(__file__), "utils", "logos_db.shelve_BKUP") # for some reason, this doesn't
# work with line: with shelve.open("utils/logos_db.shelve_BKUP") as db:
LOGOS_DB_PATH = "utils/logos_db.shelve_BKUP"

NUMBER_OF_CHARACTERS_IN_ONE_PAGE = 2600

PUBLISHER_PDF_METADATA_TAGS = [
    '/CrossMarkDomains#5B1#5D',
    '/CrossMarkDomains#5B2#5D',
    '/CrossmarkDomainExclusive',
    '/CrossmarkMajorVersionDate',
    '/doi',
    '/ElsevierWebPDFSpecifications',
예제 #7
0
파일: views.py 프로젝트: soszrg/luoluo
# -*- encoding: utf-8 -*-
from __future__ import absolute_import, unicode_literals

from django.contrib.auth.models import User
from django.db import transaction
from rest_framework import permissions
from rest_framework.response import Response
from rest_framework.views import APIView

from upload.models import Picture, PicturesInGallery, Gallery, PictureTags
from utils.common import get_logger

log = get_logger(__name__)


class PictureView(APIView):
    authentication_classes = ()
    permission_classes = (permissions.AllowAny,)

    def get(self, request):
        with transaction.atomic():
            p = Picture.objects.select_for_update().get(pk=3)
            p.headline = 'p3'
            p.save(update_fields=['headline'])
        return Response('get ok!')
예제 #8
0
        state_all = torch.load(cfg.finetune)['model']
        state_clip = {}  # only use backbone parameters
        for k, v in state_all.items():
            if 'model' in k:
                state_clip[k] = v
        net.load_state_dict(state_clip, strict=False)
    if cfg.resume is not None:
        dist_print('==> Resume model from ' + cfg.resume)
        resume_dict = torch.load(cfg.resume, map_location='cpu')
        net.load_state_dict(resume_dict['model'])
        if 'optimizer' in resume_dict.keys():
            optimizer.load_state_dict(resume_dict['optimizer'])
        resume_epoch = int(os.path.split(cfg.resume)[1][2:5]) + 1
    else:
        resume_epoch = 0

    scheduler = get_scheduler(optimizer, cfg, len(train_loader))
    dist_print(len(train_loader))
    metric_dict = get_metric_dict(cfg)
    loss_dict = get_loss_dict(cfg)
    logger = get_logger(work_dir, cfg)
    cp_projects(work_dir)

    for epoch in range(resume_epoch, cfg.epoch):

        train(net, train_loader, loss_dict, optimizer, scheduler, logger,
              epoch, metric_dict, cfg.use_aux)

        save_model(net, optimizer, epoch, work_dir, distributed)
    logger.close()
예제 #9
0
from importlib import import_module

conv_num_cfg = {
    'vgg16' : 13,
	'resnet18' : 8,
	'resnet34' : 16,
	'resnet50' : 16,
	'resnet101' : 33,
	'resnet152' : 50 
}

food_dimension = conv_num_cfg[args.cfg]

device = torch.device(f"cuda:{args.gpus[0]}") if torch.cuda.is_available() else 'cpu'
checkpoint = utils.checkpoint(args)
logger = utils.get_logger(os.path.join(args.job_dir + 'logger.log'))
loss_func = nn.CrossEntropyLoss()

# Data
print('==> Preparing data..')
def get_data_set(type='train'):
    if type == 'train':
        return imagenet_dali.get_imagenet_iter_dali('train', args.data_path, args.train_batch_size,
                                                   num_threads=4, crop=224, device_id=args.gpus[0], num_gpus=1)
    else:
        return imagenet_dali.get_imagenet_iter_dali('val', args.data_path, args.eval_batch_size,
                                                   num_threads=4, crop=224, device_id=args.gpus[0], num_gpus=1)
trainLoader = get_data_set('train')
testLoader = get_data_set('test')

예제 #10
0
def main():
    parser = argparse.ArgumentParser(description='Run Time Series Forecasting')

    parser.add_argument('--num_experiments',
                        type=int,
                        default=10,
                        help='How many experiments to run')

    parser.add_argument('--seq_length',
                        type=int,
                        default=168,
                        help='The Time Series Sequence Length')

    parser.add_argument('--horizon',
                        type=int,
                        default=24,
                        help='How Many Data Points In The Future To Predict')

    parser.add_argument('--batch_size',
                        type=int,
                        default=16,
                        help='The Training Batch Size')

    parser.add_argument('--epochs',
                        type=int,
                        default=50,
                        help='The Number Of Epochs To Run')

    parser.add_argument('--dataset',
                        type=str,
                        choices=['stocks', 'traffic'],
                        default='traffic',
                        help='Which Dataset To Load (Default=traffic)')

    args = parser.parse_args()

    ######################################### Static #########################################
    logger = get_logger('Main')
    num_experiments: int = args.num_experiments
    seq_length: int = args.seq_length
    batch_size: int = args.batch_size
    num_epochs: int = args.epochs
    horizon: int = args.horizon
    dataset: str = args.dataset

    ######################################### Organizing Data #########################################

    # loading data
    train_dataset, validation_dataset, test_dataset, scaler = load_dataset(
        dataset=dataset, seq_length=seq_length, horizon=horizon)

    n_samples, n_features = train_dataset.data.shape

    train_data_loader = DataLoader(dataset=train_dataset,
                                   batch_size=batch_size,
                                   shuffle=True)
    validation_data_loader = DataLoader(dataset=validation_dataset,
                                        batch_size=batch_size,
                                        shuffle=True)
    test_data_loader = DataLoader(dataset=test_dataset,
                                  batch_size=batch_size,
                                  shuffle=False)

    look_ahead_context = test_dataset[len(validation_dataset) - 1]

    loss_function = nn.MSELoss(reduction='sum')

    ######################################### Running Experiments #########################################

    results: Dict[str, List[TrainOutput]] = {}

    for i in range(num_experiments):
        logger.info(f'Running Experiment [{i + 1}/{num_experiments}]')
        experiment_result = run_experiment(
            n_features=n_features,
            seq_length=seq_length,
            horizon=horizon,
            train_data_loader=train_data_loader,
            validation_data_loader=validation_data_loader,
            test_data_loader=test_data_loader,
            loss_function=loss_function,
            num_epochs=num_epochs,
            look_ahead_context=look_ahead_context)

        for key, value in experiment_result.items():
            if key not in results:
                results[key] = []
            results[key].append(value)

    best_models = {
        key: min(results, key=lambda result: result.avg_test_loss)
        for key, results in results.items()
    }

    ######################################### Results #########################################

    losses_info = [
        TrainLossInfo(model_name=model_name,
                      train_loss=model.train_loss,
                      validation_loss=model.validation_loss)
        for model_name, (model, avg_loss, look_ahead) in best_models.items()
    ]

    plot_losses(losses_info=losses_info, output_dir=dataset)

    forecast_info = [
        ForecastsInfo(model_name=model_name, look_ahead_forecasts=look_ahead)
        for model_name, (model, avg_test_loss,
                         look_ahead) in best_models.items()
    ]

    plot_forecasts(input_seq=look_ahead_context[0],
                   test=look_ahead_context[1],
                   forecasts=forecast_info,
                   output_dir=dataset)

    save_results(results=results, output_directory=dataset)
예제 #11
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-

import sys
import os

ROOT_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# print(ROOT_PATH)  # /Users/huzhi/work/code/test_python/test_logging/test_3

sys.path.insert(0, ROOT_PATH)

from utils.common import get_logger

logger1 = get_logger('view/index')
print(logger1)
print(id(logger1))
print(logger1.handlers)
logger1.debug('install logger1')

logger2 = get_logger('view/index')
print(logger2)
print(id(logger2))
print(logger2.handlers)
logger2.debug('install logger2')
"""
% python install.py
<logging.Logger object at 0x1083049d0>
4432349648
[<logging.handlers.TimedRotatingFileHandler object at 0x108304a50>]
<logging.Logger object at 0x1083049d0>
4432349648
예제 #12
0
# coding=utf-8
'''

'''

import traceback, ast
from requests.sessions import Session
try:
    from urlparse import urljoin
except ImportError:
    from urllib.parse import urljoin
from utils import common
from utils.read_config import config

ws = common.get_sheet("../data/TestCase.xlsx", "TestCase")
logger = common.get_logger()
base_url = config.url
session = Session()


# 测试创建用户接口
def test_create_user():
    login_data = common.get_row_data(ws, 1)
    url_path = login_data[3]
    url = urljoin(base_url, url_path)
    data = login_data[6]
    expected_resp = login_data[8]
    expected_resp = ast.literal_eval(expected_resp)
    resp = session.post(url, data).json()
    try:
        assert resp == dict(expected_resp)
예제 #13
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-

from utils.common import get_logger

logger1 = get_logger('install1')
print(logger1)
print(id(logger1))
print(logger1.handlers)
logger1.debug('install logger1')

logger2 = get_logger('install1')
print(logger2)
print(id(logger2))
print(logger2.handlers)
logger2.debug('install logger2')

logger3 = get_logger('install2')
print(logger3)
print(id(logger3))
print(logger3.handlers)
logger3.debug('install logger3')

"""
% python install.py
<logging.Logger object at 0x10fbf89d0>
4559178192
[<logging.handlers.TimedRotatingFileHandler object at 0x10fbf8a50>]
<logging.Logger object at 0x10fbf89d0>
4559178192
[<logging.handlers.TimedRotatingFileHandler object at 0x10fbf8a50>]