def test_cross_dataset(config_file,test_dataset, **kwargs):
    cfg.merge_from_file(config_file)
    if kwargs:
        opts = []
        for k,v in kwargs.items():
            opts.append(k)
            opts.append(v)
        cfg.merge_from_list(opts)
    cfg.freeze()
    
    PersonReID_Dataset_Downloader('./datasets',cfg.DATASETS.NAMES)
    _, _, _, num_classes = data_loader(cfg,cfg.DATASETS.NAMES)
    
    PersonReID_Dataset_Downloader('./datasets',test_dataset)
    _, val_loader, num_query, _ = data_loader(cfg,test_dataset)
    
    re_ranking=cfg.RE_RANKING
    
    if not re_ranking:
        logger = make_logger("Reid_Baseline", cfg.OUTPUT_DIR,
                             cfg.DATASETS.NAMES+'->'+test_dataset)
        logger.info("Test Results:")
    else:
        logger = make_logger("Reid_Baseline", cfg.OUTPUT_DIR,
                             cfg.DATASETS.NAMES+'->'+test_dataset+'_re-ranking')
        logger.info("Re-Ranking Test Results:") 
        
    device = torch.device(cfg.DEVICE)
    
    model = getattr(models, cfg.MODEL.NAME)(num_classes)
    model.load(cfg.OUTPUT_DIR,cfg.TEST.LOAD_EPOCH)
    model = model.eval()
    
    all_feats = []
    all_pids = []
    all_camids = []
    
    since = time.time()
    for data in tqdm(val_loader, desc='Feature Extraction', leave=False):
        model.eval()
        with torch.no_grad():
            images, pids, camids = data
            if device:
                model.to(device)
                images = images.to(device)
            
            feats = model(images)

        all_feats.append(feats)
        all_pids.extend(np.asarray(pids))
        all_camids.extend(np.asarray(camids))

    cmc, mAP = evaluation(all_feats,all_pids,all_camids,num_query,re_ranking)

    logger.info("mAP: {:.1%}".format(mAP))
    for r in [1, 5, 10]:
        logger.info("CMC curve, Rank-{:<3}:{:.1%}".format(r, cmc[r - 1]))
       
    test_time = time.time() - since
    logger.info('Testing complete in {:.0f}m {:.0f}s'.format(test_time // 60, test_time % 60))
示例#2
0
    def __init__(self):
        self.DRIVER_PATH = './driver/chromedriver'
        self.INSTA_ID = '*****@*****.**'
        self.INSTA_PASSWD = 'gh767600'
        self.image_dir = './images'

        self.db = DatabaseHelper()
        self.logger = make_logger('instagram')
        self.load_driver()
示例#3
0
 def __init__(self):
     self.logger = logger.make_logger('broker.log')
     self.ctx = zmq.Context.instance()
     self.mailbox = make_socket(self.ctx)
     self.devs = set()
     self.poller = zmq.Poller()
     self.top = tk.Tk()
     self.setup_ui()
     self.mail_table = {}
     self.ui_running = True
示例#4
0
文件: device.py 项目: kgord831/labzmq
 def __init__(self, name, **kwargs):
     self.name = name
     self.params = kwargs
     self.running = False
     self.logger = logger.make_logger(name + '.log')
     self.ctx = zmq.Context.instance()
     self.mailbox = make_socket(self.ctx, name)
     self.poller = zmq.Poller()
     self.cmd_queue = CommandQueue()
     self.state = 'closed'
示例#5
0
文件: app.py 项目: vmax/dubki
def feedback():
    """
         A function handling user feedback
    """
    if request.method == 'GET':
        return redirect('/about')
    elif request.method == 'POST':
        log = make_logger('feedback.log')
        text = request.form['feedback_text']
        log(text)
        return redirect('/')
示例#6
0
def train(cfg):
    # output
    output_dir = cfg.OUTPUT_DIR
    if os.path.exists(output_dir):
        raise KeyError("Existing path: ", output_dir)
    else:
        os.makedirs(output_dir)

    with open(os.path.join(output_dir, 'config.yaml'), 'w') as f_out:
        print(cfg, file=f_out)

    # logger
    logger = make_logger("project", output_dir, 'log')

    # device
    num_gpus = 0
    if cfg.DEVICE == 'cuda':
        os.environ['CUDA_VISIBLE_DEVICES'] = cfg.DEVICE_ID
        num_gpus = len(cfg.DEVICE_ID.split(','))
        logger.info("Using {} GPUs.\n".format(num_gpus))
    cudnn.benchmark = True
    device = torch.device(cfg.DEVICE)

    # data
    train_loader, query_loader, gallery_loader, num_classes = make_loader(cfg)

    # model
    model = make_model(cfg, num_classes=num_classes)
    if num_gpus > 1:
        model = nn.DataParallel(model)

    # solver
    criterion = make_loss(cfg, num_classes)
    optimizer = make_optimizer(cfg, model)
    scheduler = make_scheduler(cfg, optimizer)

    # do_train
    trainer = Trainer(model=model,
                      optimizer=optimizer,
                      criterion=criterion,
                      logger=logger,
                      scheduler=scheduler,
                      device=device)

    trainer.run(start_epoch=0,
                total_epoch=cfg.SOLVER.MAX_EPOCHS,
                train_loader=train_loader,
                query_loader=query_loader,
                gallery_loader=gallery_loader,
                print_freq=cfg.SOLVER.PRINT_FREQ,
                eval_period=cfg.SOLVER.EVAL_PERIOD,
                out_dir=output_dir)

    print('Done.')
示例#7
0
 def __init__(self):
     conn_info = {
         'host': '192.168.219.110',
         'dbname': 'insta_matzip',
         'user': '******',
         'password': '******',
         'port': '5432'
     }
     self.conn = psycopg2.connect(**conn_info)
     self.conn.autocommit = True
     self.cur = self.conn.cursor()
     self.logger = make_logger('db')
示例#8
0
def main(newspapers, rewrite):

    #  reads from urls.data, writes to database
    #  check if html exists

    logger = make_logger("logger.log")
    from collect_urls import main as collect_urls

    #  get all urls from urls.data
    urls = collect_urls(num=-1,
                        newspapers=newspapers,
                        source="urls.data",
                        parse=False)

    for url in urls:
        parse_url(url, rewrite=rewrite, logger=logger)
示例#9
0
def main(num, newspapers, source, parse):
    logger = make_logger("logger.log")
    logger.info(f"collecting {num} from {newspapers} from {source}")

    home = TextFiles()

    newspapers = get_newspapers_from_registry(newspapers)
    print(newspapers)

    collection = []
    for paper in newspapers:
        if source == "google":
            urls = collect_from_google(num, paper, logger)
            if logger:
                logger.info(f"saving {len(urls)} to file")
            home.write(urls, "urls.data", "a")

        elif source == "urls.data":
            urls = home.get("urls.data")
            urls = urls.split("\n")
            urls.remove("")
            urls = [
                u
                for u in urls
                if paper["newspaper_url"] in u
                if paper["checker"](u, logger)
            ][:num]

            if logger:
                logger.info(f"loaded {len(urls)} urls from {source}")

        collection.extend(urls)

    collection = set(collection)
    logger.info(f"collected {len(collection)} urls")

    if parse:
        for url in collection:
            parse_url(url, rewrite=True, logger=logger)

    return collection
示例#10
0
文件: app.py 项目: vmax/dubki
def route_mobile():
    """
        A function providing the route for mobile devices

        POST args:
            _from (str): place of departure
            _to (str): place of arrival
            when(str): 'now' | 'today' | 'tomorrow'
            when_param(str): '%H:%M' datetime for reverse routing
            device_id(str): mobile device id for logging

        Returns:
            _route (str): JSON-formatted string with route
    """
    log = make_logger('route_mobile.log')
    _from = request.form['_from']
    _to = request.form['_to']
    when = request.form['when']
    when_param = request.form['when_param']
    device_id = request.form['device_id']
    log("{device_id} {_from} {_to}".format
        (
            device_id=device_id,
            _from=_from,
            _to=_to))
    if when == 'now':
        _route = calculate_route(_from, _to, datetime.now() + timedelta(minutes=10), "MOBILE")
    else:
        _date = datetime.now()
        _time = [int(x) for x in when_param.split(':')]
        _date = _date.replace(hour=_time[0], minute=_time[1])
        if when == 'tomorrow':
            _date += timedelta(days=1)
        _route = calculate_route_reverse(_from, _to, _date)
    js_route = json.dumps(_route, cls=DateTimeAwareJSONEncoder)
    response = make_response(js_route)
    response.headers['Content-Type'] = 'application/json; charset=utf-8'
    return response
示例#11
0
def train(config_file1, config_file2, **kwargs):
    # 1. config
    cfg.merge_from_file(config_file1)
    if kwargs:
        opts = []
        for k, v in kwargs.items():
            opts.append(k)
            opts.append(v)
        cfg.merge_from_list(opts)
    #cfg.freeze()
    output_dir = cfg.OUTPUT_DIR
    if output_dir and not os.path.exists(output_dir):
        os.makedirs(output_dir)

    logger = make_logger("Reid_Baseline", output_dir, 'log')
    #logger.info("Using {} GPUS".format(1))
    logger.info("Loaded configuration file {}".format(config_file1))
    logger.info("Running with config:\n{}".format(cfg))

    checkpoint_period = cfg.SOLVER.CHECKPOINT_PERIOD
    eval_period = cfg.SOLVER.EVAL_PERIOD
    #device = torch.device(cfg.DEVICE)
    epochs = cfg.SOLVER.MAX_EPOCHS

    # 2. datasets
    # Load the original dataset
    #dataset_reference = init_dataset(cfg, cfg.DATASETS.NAMES )
    dataset_reference = init_dataset(cfg, cfg.DATASETS.NAMES +
                                     '_origin')  #'Market1501_origin'
    train_set_reference = ImageDataset(dataset_reference.train,
                                       train_transforms)
    train_loader_reference = DataLoader(train_set_reference,
                                        batch_size=128,
                                        shuffle=False,
                                        num_workers=cfg.DATALOADER.NUM_WORKERS,
                                        collate_fn=train_collate_fn)
    #不用放到网络里,所以不用transform

    # Load the one-shot dataset
    train_loader, val_loader, num_query, num_classes = data_loader(
        cfg, cfg.DATASETS.NAMES)

    # 3. load the model and optimizer
    model = getattr(models, cfg.MODEL.NAME)(num_classes)
    optimizer = make_optimizer(cfg, model)
    scheduler = make_scheduler(cfg, optimizer)
    loss_fn = make_loss(cfg)
    logger.info("Start training")
    since = time.time()
    if torch.cuda.device_count() > 1:
        print("Use", torch.cuda.device_count(), 'gpus')
    elif torch.cuda.device_count() == 1:
        print("Use", torch.cuda.device_count(), 'gpu')
    model = nn.DataParallel(model)
    top = 0  # the choose of the nearest sample
    top_update = 0  # the first iteration train 80 steps and the following train 40
    train_time = 0  #1表示训练几次gan
    bound = 1  #究竟训练几次,改成多次以后再说
    lock = False
    train_compen = 0
    # 4. Train and test
    for epoch in range(epochs):
        running_loss = 0.0
        running_acc = 0
        count = 1
        # get nearest samples and reset the model
        if top_update < 80:
            train_step = 80
            #重新gan生成的图像第一次是否需要训练80次,看看是否下一次输入的图片变少了吧
        else:
            train_step = 40
        #if top_update % train_step == 0:
        if top_update % train_step == 0 and train_compen == 0:
            print("top: ", top)
            #作者原来的实验top取到41,这里折中(是否要折中也是个实验测试的点)
            #if 1==1:
            if top >= 8 and train_time < bound:
                train_compen = (top - 1) * 40 + 80
                #build_image(A,train_loader_reference,train_loader)
                train_time += 1
                #gan的训练模式
                mode = 'train'
                retrain(mode)
                #gan生成图像到原来数据集
                produce()
                cfg.merge_from_file(config_file2)
                output_dir = cfg.OUTPUT_DIR
                if output_dir and not os.path.exists(output_dir):
                    os.makedirs(output_dir)
                logger = make_logger("Reid_Baseline", output_dir, 'log')
                logger.info(
                    "Loaded configuration file {}".format(config_file2))
                logger.info("Running with config:\n{}".format(cfg))
                dataset_reference = init_dataset(
                    cfg, cfg.DATASETS.NAMES + '_origin')  #'Market1501_origin'
                train_set_reference = ImageDataset(dataset_reference.train,
                                                   train_transforms)
                train_loader_reference = DataLoader(
                    train_set_reference,
                    batch_size=128,
                    shuffle=False,
                    num_workers=cfg.DATALOADER.NUM_WORKERS,
                    collate_fn=train_collate_fn)
                dataset_ref = init_dataset(cfg, cfg.DATASETS.NAMES +
                                           '_ref')  #'Market1501_origin'
                train_set_ref = ImageDataset(dataset_ref.train,
                                             train_transforms)
                train_loader_ref = DataLoader(
                    train_set_ref,
                    batch_size=128,
                    shuffle=False,
                    num_workers=cfg.DATALOADER.NUM_WORKERS,
                    collate_fn=train_collate_fn)
                lock = True
            if lock == True:
                A, path_labeled = PSP2(model, train_loader_reference,
                                       train_loader, train_loader_ref, top,
                                       logger, cfg)
                lock = False
            else:
                A, path_labeled = PSP(model, train_loader_reference,
                                      train_loader, top, logger, cfg)

            #vis = len(train_loader_reference.dataset)
            #A= torch.ones(vis, len(train_loader_reference.dataset))
            #build_image(A,train_loader_reference,train_loader)
            top += cfg.DATALOADER.NUM_JUMP
            model = getattr(models, cfg.MODEL.NAME)(num_classes)
            model = nn.DataParallel(model)
            optimizer = make_optimizer(cfg, model)
            scheduler = make_scheduler(cfg, optimizer)
            A_store = A.clone()
        top_update += 1

        for data in tqdm(train_loader, desc='Iteration', leave=False):
            model.train()
            images, labels_batch, img_path = data
            index, index_labeled = find_index_by_path(img_path,
                                                      dataset_reference.train,
                                                      path_labeled)
            images_relevant, GCN_index, choose_from_nodes, labels = load_relevant(
                cfg, dataset_reference.train, index, A_store, labels_batch,
                index_labeled)
            # if device:
            model.to(device)
            images = images_relevant.to(device)

            scores, feat = model(images)
            del images
            loss = loss_fn(scores, feat, labels.to(device), choose_from_nodes)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            count = count + 1
            running_loss += loss.item()
            running_acc += (scores[choose_from_nodes].max(1)[1].cpu() ==
                            labels_batch).float().mean().item()

        scheduler.step()

        # for model save if you need
        # if (epoch+1) % checkpoint_period == 0:
        #     model.cpu()
        #     model.save(output_dir,epoch+1)

        # Validation
        if (epoch + 1) % eval_period == 0:
            all_feats = []
            all_pids = []
            all_camids = []
            for data in tqdm(val_loader,
                             desc='Feature Extraction',
                             leave=False):
                model.eval()
                with torch.no_grad():
                    images, pids, camids = data

                    model.to(device)
                    images = images.to(device)

                    feats = model(images)
                    del images
                all_feats.append(feats.cpu())
                all_pids.extend(np.asarray(pids))
                all_camids.extend(np.asarray(camids))

            cmc, mAP = evaluation(all_feats, all_pids, all_camids, num_query)
            logger.info("Validation Results - Epoch: {}".format(epoch + 1))
            logger.info("mAP: {:.1%}".format(mAP))
            for r in [1, 5, 10, 20]:
                logger.info("CMC curve, Rank-{:<3}:{:.1%}".format(
                    r, cmc[r - 1]))
        if train_compen > 0:
            train_compen -= 1

    time_elapsed = time.time() - since
    logger.info('Training complete in {:.0f}m {:.0f}s'.format(
        time_elapsed // 60, time_elapsed % 60))
    logger.info('-' * 10)
示例#12
0
Line 139:20140117_020001:(14:E:\Logs\1234\) skip [20140117]E:\Logs\1234\StartupException_20140117_003531_455.log

BAD
Line 478:20140117_020005:(24:E:\var\Logs\1234\ExceptionLogBackup\) Starting

GOOD
Line 3302:20140117_030522:(20:E:\Logs\Wedge\Auto\) Failed deleting file
    [20131227_Task20_Auto.3]E:\Logs\Wedge\Auto\new_path_summary.log.12-27-2013.log.System.IO.IOException:
     The process cannot access the file 'E:\Logs\Wedge\Auto\new_path_summary.log.12-27-2013.log' because it is being used by another process.

Line 49:System.Web.Services.Protocols.SoapException: Server was unable to process request. ---> Object reference not set to an instance of an object.
matches found:1

"""

log = logger.make_logger('main', 'archiverUpdate.log', '.', 10).log


def runVmBFA():
    log(10, 'start')
    xml = subprocess.check_output("Topology VmBox %")
    log(10, 'xml returned')
    status, vmb_obj = VmBoxList.obj_wrapper(xml)
    log(10, 'vmb_obj returned')
    pool = multiprocessing.Pool(10)
    pool.map(FindArchiver(), vmb_obj.VMBox)

class FindArchiver(object):

    fa_re_obj = re.compile(r'filearchive.exe', re.IGNORECASE)
示例#13
0
try:
    import logger
    logger = logger.make_logger('pserver-test', debug=False, colored=False)
except ImportError:
    import logging
    logging.basicConfig(level = logging.DEBUG)
    logger = logging#.getLogger()


示例#14
0
import subprocess
import os
import sys
from datetime import datetime
import time
import yaml
from logger import make_logger

LOGGER = make_logger(sys.stderr, "gpu_protector")


def main():

    kill_temperature = int(sys.argv[1])

    with open("miner_conf.yaml", 'r') as f:
        miner_name_list = yaml.load(f, Loader=yaml.FullLoader)
    LOGGER.info("Miner Program List: {}".format(miner_name_list))
    print("Miner Program List: {}".format(miner_name_list))

    process = subprocess.Popen(
        "nvidia-smi --query-gpu=temperature.gpu --format=csv,noheader",
        stdout=subprocess.PIPE,
        stderr=subprocess.PIPE,
        shell=True)
    output = process.communicate()[0]

    for i, line in enumerate(output.splitlines(), 0):
        gputemp = int(line.decode())
        LOGGER.info("GPU #{} Temperature = {}".format(i, gputemp))
        print("GPU #{} Temperature = {}".format(i, gputemp))
示例#15
0
文件: route.py 项目: vmax/dubki
#!/usr/bin/env python
# -*- coding: utf-8 -*-

"""
    A module providing functionality for calculating routes
"""

from datetime import datetime
from datetime import timedelta
from route_bus import get_nearest_bus
from route_train import get_nearest_train
from route_subway import get_nearest_subway
from route_onfoot import get_nearest_onfoot
from logger import make_logger

ROUTE_LOG = make_logger('routing.log')
JSON_LOG = make_logger('json.log')
REVERSE_ROUTE_LOG = make_logger('reverse_routing.log')

#: list of dormitories
DORMS = {
    'dubki': 'Дубки',
    'odintsovo': 'Одинцово'
}

#: list of education campuses
EDUS = {
    'aeroport': 'Кочновский проезд (метро Аэропорт)',
    'strogino': 'Строгино',
    'myasnitskaya': 'Мясницкая (метро Лубянка)',
    'vavilova': 'Вавилова (метро Ленинский проспект)',
示例#16
0
import urllib3
import requests
import http
from functools import wraps
from logger import make_logger
import os


logger = make_logger(__name__, 'dataGetter_log')


def connection_exception(f):
    @wraps(f)
    def call(*args, **kwargs):
        result = None
        try:
            result = f(*args, **kwargs)
        except urllib3.response.ProtocolError as e:
            logger.error(f'[urllib3] {f.__name__}', e)
        except http.client.IncompleteRead as e:
            logger.error(f'[http] {f.__name__}', e)
        except requests.models.ChunkedEncodingError as e:
            logger.error(f'[requests] {f.__name__}', e)
        except BaseException as e:
            logger.error(
                'some Exception happend when send and receiving data. %s ', e)
        finally:
            # if os.environ.get('DEBUG_DG_API'):
            #     print("==========>")
            #     print(f)
            #     print("==========>")
示例#17
0
"""
This is only used in db_init and db_init now is largely
deprecated.

This garbage really shows what will happen if you try to do
concurrency without some study unfront.
"""
from threading import Thread, Lock
from queue import Queue
from typing import TypeVar, Iterable, Callable, List, cast
from itertools import islice
from .generator_chunks import chunks

from logger import make_logger

logger = make_logger('concurrent_fetch', 'concurrent_fetch_log')
logger.propagate = False

T = TypeVar("T")
LazyBox = Iterable  # iter with only one element. for lazy eval
JobBox = LazyBox[T]


def thread_fetcher(jobs: Iterable[Callable[[], T]],
                   max_thread: int,
                   fetcher: Callable[[Queue, Callable[[], T], Lock], None],
                   consumer: Callable[[Queue], None] = lambda q: None,
                   before_consume_hook: Callable = lambda: None,
                   after_consume_hook: Callable = lambda: None) -> None:
    """
    Threaded worker for data fetching
示例#18
0
# Command: python miner_watchdog.py kai_test_miner 15 ETH multi debug
# Eth speed: 46.754 MH/s

OK = '\033[36m'
FAIL = '\033[41m'
LOG = '\033[32m'
WARNING = '\033[33m'
ENDC = '\033[0m'
READ_FLAG = [True]

current_pid = None

miner_stdout_buffer = []

LOGGER = make_logger(sys.stderr, "coin-watchdog")


def get_most_profitable_coin(coin_pos, miner_dict, default_coin):
    try:
        scope = ['https://spreadsheets.google.com/feeds']
        creds = ServiceAccountCredentials.from_json_keyfile_name(
            'key3.json', scope)
        gc = gspread.authorize(creds)
        wks = gc.open_by_url(
            "https://docs.google.com/spreadsheets/d/12G_XdpgLKY_nb3zYI1BWfncjMJBcqVROkHoJcXO-JcE"
        ).worksheet("Coin Switch")
        coin = wks.acell(coin_pos).value
    except Exception as e:
        print(
            FAIL,
def train(config_file, resume=False, iteration=10, STEP=4, **kwargs):
    """
    Parameter
    ---------
    resume : bool
        If true, continue the training and append logs to the previous log.
    iteration : int
        number of loops to test Random Datasets.
    STEP : int
        Number of steps to train the discriminator per batch
    """

    cfg.merge_from_file(config_file)
    if kwargs:
        opts = []
        for k, v in kwargs.items():
            opts.append(k)
            opts.append(v)
        cfg.merge_from_list(opts)
    cfg.freeze()

    # [PersonReID_Dataset_Downloader('./datasets', name) for name in cfg.DATASETS.NAMES]

    output_dir = cfg.OUTPUT_DIR
    if output_dir and not os.path.exists(output_dir):
        os.makedirs(output_dir)

    logger = make_logger("Reid_Baseline", output_dir, 'log', resume)
    if not resume:
        logger.info("Using {} GPUS".format(1))
        logger.info("Loaded configuration file {}".format(config_file))
        logger.info("Running with config:\n{}".format(cfg))

    checkpoint_period = cfg.SOLVER.CHECKPOINT_PERIOD
    eval_period = cfg.SOLVER.EVAL_PERIOD
    output_dir = cfg.OUTPUT_DIR
    device = torch.device(cfg.DEVICE)
    epochs = cfg.SOLVER.MAX_EPOCHS
    sources = cfg.DATASETS.SOURCE
    target = cfg.DATASETS.TARGET
    pooling = cfg.MODEL.POOL
    last_stride = cfg.MODEL.LAST_STRIDE

    # tf_board_path = os.path.join(output_dir, 'tf_runs')
    # if os.path.exists(tf_board_path):
    #     shutil.rmtree(tf_board_path)
    # writer = SummaryWriter(tf_board_path)

    gan_d_param = cfg.MODEL.D_PARAM
    gan_g_param = cfg.MODEL.G_PARAM
    class_param = cfg.MODEL.CLASS_PARAM
    """Set up"""
    train_loader, _, _, num_classes = data_loader(cfg,
                                                  cfg.DATASETS.SOURCE,
                                                  merge=cfg.DATASETS.MERGE)

    num_classes_train = [
        data_loader(cfg, [source], merge=False)[3]
        for source in cfg.DATASETS.SOURCE
    ]

    # based on input datasets
    bias = (max(num_classes_train)) / np.array(num_classes_train)
    bias = bias / bias.sum() * 5

    discriminator_loss = LabelSmoothingLoss(len(sources),
                                            weights=bias,
                                            smoothing=0.1)
    minus_generator_loss = LabelSmoothingLoss(len(sources),
                                              weights=bias,
                                              smoothing=0.)
    classification_loss = LabelSmoothingLoss(num_classes, smoothing=0.1)
    from loss.triplet_loss import TripletLoss
    triplet = TripletLoss(cfg.SOLVER.MARGIN)
    triplet_loss = lambda feat, labels: triplet(feat, labels)[0]

    module = getattr(generalizers, cfg.MODEL.NAME)
    D = getattr(module, 'Generalizer_D')(len(sources))
    G = getattr(module, 'Generalizer_G')(num_classes, last_stride, pooling)
    if resume:
        checkpoints = get_last_stats(output_dir)
        D.load_state_dict(torch.load(checkpoints[str(type(D))]))
        G.load_state_dict(torch.load(checkpoints[str(type(G))]))
        if device:  # must be done before the optimizer generation
            D.to(device)
            G.to(device)

    discriminator_optimizer = Adam(D.parameters(),
                                   lr=cfg.SOLVER.BASE_LR,
                                   weight_decay=cfg.SOLVER.WEIGHT_DECAY)
    generator_optimizer = Adam(G.parameters(),
                               lr=cfg.SOLVER.BASE_LR,
                               weight_decay=cfg.SOLVER.WEIGHT_DECAY)
    discriminator_scheduler = make_scheduler(cfg, discriminator_optimizer)
    generator_scheduler = make_scheduler(cfg, generator_optimizer)
    base_epo = 0
    if resume:
        discriminator_optimizer.load_state_dict(
            torch.load(checkpoints['D_opt']))
        generator_optimizer.load_state_dict(torch.load(checkpoints['G_opt']))
        discriminator_scheduler.load_state_dict(
            torch.load(checkpoints['D_sch']))
        generator_scheduler.load_state_dict(torch.load(checkpoints['G_sch']))
        base_epo = checkpoints['epo']

    # Modify the labels:
    # RULE:
    # according to the order of names in cfg.DATASETS.NAMES, add base numebr

    since = time.time()
    if not resume:
        logger.info("Start training")

    batch_count = 0
    STEP = 4
    Best_R1s = [0, 0, 0, 0]
    Benchmark = [69.6, 43.7, 59.4, 78.2]

    for epoch in range(epochs):
        # anneal = sigmoid(annealing_base + annealing_factor*(epoch+base_epo))
        anneal = max(1 - (1 / 80 * epoch), 0)
        count = 0
        running_g_loss = 0.
        running_source_loss = 0.
        running_class_acc = 0.
        running_acc_source = 0.
        running_class_loss = 0.

        reset()

        for data in tqdm(train_loader, desc='Iteration', leave=False):
            # NOTE: zip ensured the shortest dataset dominates the iteration
            D.train()
            G.train()
            images, labels, domains = data
            if device:
                D.to(device)
                G.to(device)
                images, labels, domains = images.to(device), labels.to(
                    device), domains.to(device)
            """Start Training D"""

            feature_vec, scores, gan_vec = G(images)

            for param in G.parameters():
                param.requires_grad = False
            for param in D.parameters():
                param.requires_grad = True

            for _ in range(STEP):
                discriminator_optimizer.zero_grad()

                pred_domain = D(
                    [v.detach()
                     for v in gan_vec] if isinstance(gan_vec, list) else
                    gan_vec.detach())  # NOTE: Feat output! Not Probability!

                d_losses, accs = discriminator_loss(pred_domain,
                                                    domains,
                                                    compute_acc=True)
                d_source_loss = d_losses.mean()
                d_source_acc = accs.float().mean().item()
                d_loss = d_source_loss

                w_d_loss = anneal * d_loss * gan_d_param

                w_d_loss.backward()
                discriminator_optimizer.step()
            """Start Training G"""

            for param in D.parameters():
                param.requires_grad = False
            for param in G.parameters():
                param.requires_grad = True

            generator_optimizer.zero_grad()

            g_loss = -1. * minus_generator_loss(D(gan_vec), domains).mean()
            class_loss = classification_loss(scores, labels).mean()
            tri_loss = triplet_loss(feature_vec, labels)
            class_loss = class_loss * cfg.SOLVER.LAMBDA1 + tri_loss * cfg.SOLVER.LAMBDA2

            w_regularized_g_loss = anneal * gan_g_param * g_loss + class_param * class_loss

            w_regularized_g_loss.backward()
            generator_optimizer.step()
            """Stop training"""

            running_g_loss += g_loss.item()
            running_source_loss += d_source_loss.item()

            running_acc_source += d_source_acc  # TODO: assume all batches are the same size
            running_class_loss += class_loss.item()

            class_acc = (scores.max(1)[1] == labels).float().mean().item()
            running_class_acc += class_acc

            # writer.add_scalar('D_loss', d_source_loss.item(), batch_count)
            # writer.add_scalar('D_acc', d_source_acc, batch_count)
            # writer.add_scalar('G_loss', g_loss.item(), batch_count)
            # writer.add_scalar('Class_loss', class_loss.item(), batch_count)
            # writer.add_scalar('Class_acc', class_acc, batch_count)

            torch.cuda.empty_cache()
            count = count + 1
            batch_count += 1

            # if count == 10:break

        logger.info(
            "Epoch[{}] Iteration[{}] Loss: [G] {:.3f} [D] {:.3f} [Class] {:.3f}, Acc: [Class] {:.3f} [D] {:.3f}, Base Lr: {:.2e}"
            .format(epoch + base_epo + 1, count, running_g_loss / count,
                    running_source_loss / count, running_class_loss / count,
                    running_class_acc / count, running_acc_source / count,
                    generator_scheduler.get_lr()[0]))

        generator_scheduler.step()
        discriminator_scheduler.step()

        if (epoch + base_epo + 1) % checkpoint_period == 0:
            G.cpu()
            G.save(output_dir, epoch + base_epo + 1)
            D.cpu()
            D.save(output_dir, epoch + base_epo + 1)
            torch.save(
                generator_optimizer.state_dict(),
                os.path.join(output_dir,
                             'G_opt_epo' + str(epoch + base_epo + 1) + '.pth'))
            torch.save(
                discriminator_optimizer.state_dict(),
                os.path.join(output_dir,
                             'D_opt_epo' + str(epoch + base_epo + 1) + '.pth'))
            torch.save(
                generator_scheduler.state_dict(),
                os.path.join(output_dir,
                             'G_sch_epo' + str(epoch + base_epo + 1) + '.pth'))
            torch.save(
                discriminator_scheduler.state_dict(),
                os.path.join(output_dir,
                             'D_sch_epo' + str(epoch + base_epo + 1) + '.pth'))

        # Validation
        if (epoch + base_epo + 1) % eval_period == 0:
            # Validation on Target Dataset
            for target in cfg.DATASETS.TARGET:
                mAPs = []
                cmcs = []
                for i in range(iteration):

                    set_seeds(i)

                    _, val_loader, num_query, _ = data_loader(cfg, (target, ),
                                                              merge=False,
                                                              verbose=False)

                    all_feats = []
                    all_pids = []
                    all_camids = []

                    since = time.time()
                    for data in tqdm(val_loader,
                                     desc='Feature Extraction',
                                     leave=False):
                        G.eval()
                        with torch.no_grad():
                            images, pids, camids = data
                            if device:
                                G.to(device)
                                images = images.to(device)

                            feats = G(images)
                            feats /= feats.norm(dim=-1, keepdim=True)

                        all_feats.append(feats)
                        all_pids.extend(np.asarray(pids))
                        all_camids.extend(np.asarray(camids))

                    cmc, mAP = evaluation(all_feats, all_pids, all_camids,
                                          num_query)
                    mAPs.append(mAP)
                    cmcs.append(cmc)

                mAP = np.mean(np.array(mAPs))
                cmc = np.mean(np.array(cmcs), axis=0)

                mAP_std = np.std(np.array(mAPs))
                cmc_std = np.std(np.array(cmcs), axis=0)

                logger.info("Validation Results: {} - Epoch: {}".format(
                    target, epoch + 1 + base_epo))
                logger.info("mAP: {:.1%} (std: {:.3%})".format(mAP, mAP_std))
                for r in [1, 5, 10]:
                    logger.info(
                        "CMC curve, Rank-{:<3}:{:.1%} (std: {:.3%})".format(
                            r, cmc[r - 1], cmc_std[r - 1]))

        # Record Best
        if (epoch + base_epo + 1) > 60 and ((epoch + base_epo + 1) % 5 == 1 or
                                            (epoch + base_epo + 1) % 5 == 2):
            # Validation on Target Dataset
            R1s = []
            for target in cfg.DATASETS.TARGET:
                mAPs = []
                cmcs = []
                for i in range(iteration):

                    set_seeds(i)

                    _, val_loader, num_query, _ = data_loader(cfg, (target, ),
                                                              merge=False,
                                                              verbose=False)

                    all_feats = []
                    all_pids = []
                    all_camids = []

                    since = time.time()
                    for data in tqdm(val_loader,
                                     desc='Feature Extraction',
                                     leave=False):
                        G.eval()
                        with torch.no_grad():
                            images, pids, camids = data
                            if device:
                                G.to(device)
                                images = images.to(device)

                            feats = G(images)
                            feats /= feats.norm(dim=-1, keepdim=True)

                        all_feats.append(feats)
                        all_pids.extend(np.asarray(pids))
                        all_camids.extend(np.asarray(camids))

                    cmc, mAP = evaluation(all_feats, all_pids, all_camids,
                                          num_query)
                    mAPs.append(mAP)
                    cmcs.append(cmc)

                mAP = np.mean(np.array(mAPs))
                cmc = np.mean(np.array(cmcs), axis=0)
                R1 = cmc[0]
                R1s.append(R1)

            if (np.array(R1s) > np.array(Best_R1s)).all():
                logger.info("Best checkpoint at {}: {}".format(
                    str(epoch + base_epo + 1),
                    ', '.join([str(s) for s in R1s])))
                Best_R1s = R1s
                G.cpu()
                G.save(output_dir, -1)
                D.cpu()
                D.save(output_dir, -1)
                torch.save(
                    generator_optimizer.state_dict(),
                    os.path.join(output_dir, 'G_opt_epo' + str(-1) + '.pth'))
                torch.save(
                    discriminator_optimizer.state_dict(),
                    os.path.join(output_dir, 'D_opt_epo' + str(-1) + '.pth'))
                torch.save(
                    generator_scheduler.state_dict(),
                    os.path.join(output_dir, 'G_sch_epo' + str(-1) + '.pth'))
                torch.save(
                    discriminator_scheduler.state_dict(),
                    os.path.join(output_dir, 'D_sch_epo' + str(-1) + '.pth'))
            else:
                logger.info("Rank 1 results: {}".format(', '.join(
                    [str(s) for s in R1s])))

    time_elapsed = time.time() - since
    logger.info('Training complete in {:.0f}m {:.0f}s'.format(
        time_elapsed // 60, time_elapsed % 60))
    logger.info('-' * 10)
示例#20
0
import logger
gmaps_logger = logger.make_logger("gmaps_logger", "Gmaps_logger.log")
import requests
try:
    import credentials
except (ModuleNotFoundError):
    gmaps_logger.debug("Wrapper startup - no credentials file")
import logging
import datetime
import json


#wrapper for making requests to the google maps developer api
#can pass the api key in a "credentials.py" file or as a value
class Gmaps():
    def __init__(self, key=None):
        if not key:
            #checks if a key was passed in a credentials file or a value
            #if it wasnt - raises an error
            try:
                self.api_key = credentials.api_key
            except (NameError):
                gmaps_logger.debug(
                    "Authontication error - No credentials file and no key passed"
                )
                raise
        else:
            self.api_key = key

    #return all places in a circle around the center coordinate with the "radius" radius, optionally only return a single type of place
    def get_places_nearby(self,
示例#21
0
#!/home/kyle/anaconda3/bin/python
import zmq
from MsgID import gen_id

import names
import time
import random
import logger
import errno
import tkinter as tk

NBR_DEVS = names.NBR_DEVS

app_log = logger.make_logger('broker.log')
"""
mail_table is a dictionary
the key is a uuid generated by uuid.uuid4(), used to uniquely identify messages
the value for each key is a tuple (from_addr, to_addr, msg)

devs is a python set which contain bytes representation of names
"""


class Broker():
    def __init__(self):
        self.logger = logger.make_logger('broker.log')
        self.ctx = zmq.Context.instance()
        self.mailbox = make_socket(self.ctx)
        self.devs = set()
        self.poller = zmq.Poller()
        self.top = tk.Tk()
def test_random_datasets(config_file,
                         iteration=10,
                         model_type="generalizer",
                         **kwargs):

    if model_type == "normal":
        from config.default_multi_domain import _C as cfg
    elif model_type == "generalizer":
        from config.default_multi_domain import _C as cfg
    else:
        raise ValueError("Model type can only be normal or generalizer.")

    cfg.merge_from_file(config_file)
    if kwargs:
        opts = []
        for k, v in kwargs.items():
            opts.append(k)
            opts.append(v)
        cfg.merge_from_list(opts)
    cfg.freeze()

    # PersonReID_Dataset_Downloader('./datasets',cfg.DATASETS.NAMES)
    _, _, _, num_classes = data_loader(cfg,
                                       cfg.DATASETS.SOURCE,
                                       merge=cfg.DATASETS.MERGE)

    re_ranking = cfg.RE_RANKING

    device = torch.device(cfg.DEVICE)

    if model_type == "generalizer":
        module = getattr(generalizers, cfg.MODEL.NAME)
        model = getattr(module,
                        'Generalizer_G')(num_classes, cfg.MODEL.LAST_STRIDE,
                                         cfg.MODEL.POOL)
        checkpoints = get_last_stats(cfg.OUTPUT_DIR)
        model_dict = torch.load(checkpoints[str(type(model))])
        model.load_state_dict(model_dict)

    elif model_type == "normal":
        model = getattr(models,
                        cfg.MODEL.NAME)(num_classes, cfg.MODEL.LAST_STRIDE,
                                        cfg.MODEL.POOL)
        checkpoints = get_last_stats(cfg.OUTPUT_DIR, [cfg.MODEL.NAME])
        model_dict = torch.load(checkpoints[cfg.MODEL.NAME])
        model.load_state_dict(model_dict)

    model = model.eval()

    if not re_ranking:
        logger = make_logger("Reid_Baseline", cfg.OUTPUT_DIR,
                             'epo' + str(checkpoints['epo']))
        logger.info("Test Results:")
    else:
        logger = make_logger("Reid_Baseline", cfg.OUTPUT_DIR,
                             'epo' + str(checkpoints['epo']) + '_re-ranking')
        logger.info("Re-Ranking Test Results:")

    for test_dataset in cfg.DATASETS.TARGET:
        mAPs = []
        cmcs = []
        for i in range(iteration):

            set_seeds(i)

            _, val_loader, num_query, _ = data_loader(cfg, (test_dataset, ),
                                                      merge=False)

            all_feats = []
            all_pids = []
            all_camids = []

            since = time.time()
            for data in tqdm(val_loader,
                             desc='Feature Extraction',
                             leave=False):
                model.eval()
                with torch.no_grad():
                    images, pids, camids = data
                    if device:
                        model.to(device)
                        images = images.to(device)

                    feats = model(images)
                    feats /= feats.norm(dim=-1, keepdim=True)

                all_feats.append(feats)
                all_pids.extend(np.asarray(pids))
                all_camids.extend(np.asarray(camids))

            cmc, mAP = evaluation(all_feats, all_pids, all_camids, num_query,
                                  re_ranking)
            mAPs.append(mAP)
            cmcs.append(cmc)

        mAP = np.mean(np.array(mAPs))
        cmc = np.mean(np.array(cmcs), axis=0)

        mAP_std = np.std(np.array(mAPs))
        cmc_std = np.std(np.array(cmcs), axis=0)

        logger.info("mAP: {:.1%} (std: {:.3%})".format(mAP, mAP_std))
        for r in [1, 5, 10]:
            logger.info("CMC curve, Rank-{:<3}:{:.1%} (std: {:.3%})".format(
                r, cmc[r - 1], cmc_std[r - 1]))

    test_time = time.time() - since
    logger.info('Testing complete in {:.0f}m {:.0f}s'.format(
        test_time // 60, test_time % 60))
示例#23
0
from ..apis.xiaomiGetter import ResourceResponse
from timeutils.time import date_range_iter
from timeutils.time import str_to_datetime
from timeutils.time import timestamp_setdigits
from .tokenManager import TokenManager
from .dataType import Device
from .dataType import Location
from .dataType import Spot
from .dataType import SpotData
from .dataType import SpotRecord
from .dataType import WrongDidException
from .dataType import device_check
from .dataType import DataSource
from .dataType import RecordThunkIter

logger = make_logger('dataMidware', 'dataGetter_log')
logger.propagate = False


"""
some of the device models are ignored. Largely because the are
just gateway for gathering data from its child devices.
"""
deviceModels: Dict = {
    'lumi.acpartner.v3': ["on_off_status", "cost_energy"],  # AC
    'lumi.gateway.aq1': [],                                 # ignore
    'lumi.plug.v1': ["plug_status", "cost_energy"],         # AC
    'lumi.sensor_ht.v1': ["humitidy_value", "temperature_value"],
    'lumi.sensor_magnet.v2': ["magnet_status"],
    'lumi.sensor_motion.v2': []                             # ignore
}
示例#24
0
def train(config_file, **kwargs):
    # 1. config
    cfg.merge_from_file(config_file)
    if kwargs:
        opts = []
        for k, v in kwargs.items():
            opts.append(k)
            opts.append(v)
        cfg.merge_from_list(opts)
    cfg.freeze()

    output_dir = cfg.OUTPUT_DIR
    if output_dir and not os.path.exists(output_dir):
        os.makedirs(output_dir)

    logger = make_logger("Reid_Baseline", output_dir, 'log')
    logger.info("Using {} GPUS".format(1))
    logger.info("Loaded configuration file {}".format(config_file))
    logger.info("Running with config:\n{}".format(cfg))

    checkpoint_period = cfg.SOLVER.CHECKPOINT_PERIOD
    eval_period = cfg.SOLVER.EVAL_PERIOD
    device = torch.device(cfg.DEVICE)
    epochs = cfg.SOLVER.MAX_EPOCHS

    # 2. datasets
    # Load the original dataset
    dataset_reference = init_dataset(cfg, cfg.DATASETS.NAMES +
                                     '_origin')  #'Market1501_origin'
    train_set_reference = ImageDataset(dataset_reference.train,
                                       train_transforms)
    train_loader_reference = DataLoader(train_set_reference,
                                        batch_size=128,
                                        shuffle=False,
                                        num_workers=cfg.DATALOADER.NUM_WORKERS,
                                        collate_fn=train_collate_fn)

    # Load the one-shot dataset
    train_loader, val_loader, num_query, num_classes = data_loader(
        cfg, cfg.DATASETS.NAMES)

    # 3. load the model and optimizer
    model = getattr(models, cfg.MODEL.NAME)(num_classes)
    optimizer = make_optimizer(cfg, model)
    scheduler = make_scheduler(cfg, optimizer)
    loss_fn = make_loss(cfg)
    logger.info("Start training")
    since = time.time()

    top = 0  # the choose of the nearest sample
    top_update = 0  # the first iteration train 80 steps and the following train 40

    # 4. Train and test
    for epoch in range(epochs):
        running_loss = 0.0
        running_acc = 0
        count = 1

        # get nearest samples and reset the model
        if top_update < 80:
            train_step = 80
        else:
            train_step = 40
        if top_update % train_step == 0:
            print("top: ", top)
            A, path_labeled = PSP(model, train_loader_reference, train_loader,
                                  top, cfg)
            top += cfg.DATALOADER.NUM_JUMP
            model = getattr(models, cfg.MODEL.NAME)(num_classes)
            optimizer = make_optimizer(cfg, model)
            scheduler = make_scheduler(cfg, optimizer)
            A_store = A.clone()
        top_update += 1

        for data in tqdm(train_loader, desc='Iteration', leave=False):
            model.train()
            images, labels_batch, img_path = data
            index, index_labeled = find_index_by_path(img_path,
                                                      dataset_reference.train,
                                                      path_labeled)
            images_relevant, GCN_index, choose_from_nodes, labels = load_relevant(
                cfg, dataset_reference.train, index, A_store, labels_batch,
                index_labeled)
            # if device:
            model.to(device)
            images = images_relevant.to(device)

            scores, feat = model(images)
            del images
            loss = loss_fn(scores, feat, labels.to(device), choose_from_nodes)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            count = count + 1
            running_loss += loss.item()
            running_acc += (scores[choose_from_nodes].max(1)[1].cpu() ==
                            labels_batch).float().mean().item()

        scheduler.step()

        # for model save if you need
        # if (epoch+1) % checkpoint_period == 0:
        #     model.cpu()
        #     model.save(output_dir,epoch+1)

        # Validation
        if (epoch + 1) % eval_period == 0:
            all_feats = []
            all_pids = []
            all_camids = []
            for data in tqdm(val_loader,
                             desc='Feature Extraction',
                             leave=False):
                model.eval()
                with torch.no_grad():
                    images, pids, camids = data

                    model.to(device)
                    images = images.to(device)

                    feats = model(images)
                    del images
                all_feats.append(feats.cpu())
                all_pids.extend(np.asarray(pids))
                all_camids.extend(np.asarray(camids))

            cmc, mAP = evaluation(all_feats, all_pids, all_camids, num_query)
            logger.info("Validation Results - Epoch: {}".format(epoch + 1))
            logger.info("mAP: {:.1%}".format(mAP))
            for r in [1, 5, 10, 20]:
                logger.info("CMC curve, Rank-{:<3}:{:.1%}".format(
                    r, cmc[r - 1]))

    time_elapsed = time.time() - since
    logger.info('Training complete in {:.0f}m {:.0f}s'.format(
        time_elapsed // 60, time_elapsed % 60))
    logger.info('-' * 10)
示例#25
0
#!/usr/bin/python

import os
import psycopg2
import json
import time
from filters_json import filter_list as FilterMap
from logger import make_logger
import inspect

logger = make_logger(inspect.stack()[0][1], 'retoracle.log')


QUERY_STRINGS = {}
DB_CONFIG = {}

NUM_TWEETS_NEED_SA = """\
SELECT COUNT(DISTINCT tweet_id)
FROM
(SELECT tweet_id FROM tweets
EXCEPT
SELECT tweet_id FROM tweet_sent) as intrsct;
"""

GET_TWEET_BATCH_NEED_SA = """SELECT tweet_id, tweet_text
FROM tweets
WHERE tweet_id
IN
(SELECT tweet_id FROM tweets EXCEPT select tweet_id FROM tweet_sent)
LIMIT %s;
"""
import Gmaps
import logger
import time
import json

circle_logger = logger.make_logger("circle_logger", "Circle_logger.log")


#add types if u finish!
class Circle():
    def __init__(self, center, radius, type=None):
        self.center = center
        self.radius = radius
        self.type = type

        #try to create a gmaps object - if failed raise a value error

        try:
            self.Gmaps = Gmaps.Gmaps()
        except (NameError):
            raise (ValueError(
                "Gmaps authentication error - no key or credentials file"))

        #attempt to find all places in circle

        self.my_places = self.find_all_places_in_circle()
        #self.my_places = self.my_places["results"]

        #if function returns string - return the error

        if isinstance(self.my_places, dict):
示例#27
0
# import sys
import json
import time
import sys
import boto
import sql_queries as sql_q
from boto.s3.key import Key
import boto.emr
from boto.emr.step import StreamingStep
import SA_Mapper
from SentimentAnalysis import agg_sent
from logger import make_logger
import inspect

logger = make_logger(inspect.stack()[0][1], 'retoracle.log')

# At most the worker will check for more Tweets to
# analyze every MIN_EXECUTION_PERIOD seconds
MIN_EXECUTION_PERIOD = 5.0

# If the number of Tweets to analyze is > EMR_THRESHOLD
# Then this worker will use Hadoop to do SA
# But only is ALLOW_EMR is also True
EMR_THRESHOLD = 1000

# When creating a Hadoop job, whats the max batch size to aim for?
MAX_BATCH_SIZE = 5000

# Allow this worker to spin up EMR (Amazon's Hadoop)
# jobs for up to BATCH_SIZE batches of Tweets that need SA?
示例#28
0
def train(opt):
    train_dataset = VOCDetection(transform=SSDAugmentation(
        opt.DATASETS.MIN_DIM, opt.DATASETS.MEANS),
                                 opt=opt)

    test_dataset = VOCDetection(['test'],
                                BaseTransform(300, opt.DATASETS.MEANS),
                                VOCAnnotationTransform(),
                                opt=opt)

    ssd_net = build_ssd('train', opt.DATASETS.MIN_DIM, opt.DATASETS.NUM_CLS)
    net = ssd_net
    # logger
    logger = make_logger("project", opt.OUTPUT_DIR, 'log')

    if len(opt.DEVICE_ID) > 1:
        net = torch.nn.DataParallel(ssd_net)
        cudnn.benchmark = True

    if opt.MODEL.RESUM:
        print('Resuming training, loading {}...'.format(opt.MODEL.RESUM))
        ssd_net.load_weights(opt.MODEL.RESUM)
    else:
        vgg_weights = torch.load(
            os.path.join(opt.MODEL.BACKBONE_WEIGHTS, opt.MODEL.BACKBONE))
        print('Loading base network...')
        ssd_net.vgg.load_state_dict(vgg_weights)

    if opt.DEVICE:
        net = net.cuda()

    if not opt.MODEL.RESUM:
        print('Initializing backbone_weights...')
        # initialize newly added layers' backbone_weights with xavier method
        ssd_net.extras.apply(weights_init)
        ssd_net.loc.apply(weights_init)
        ssd_net.conf.apply(weights_init)

    optimizer = optim.SGD(net.parameters(),
                          lr=opt.SOLVER.BASE_LR,
                          momentum=opt.SOLVER.MOMENTUM,
                          weight_decay=opt.SOLVER.WEIGHT_DECAY)
    criterion = MultiBoxLoss(opt.DATASETS.NUM_CLS, 0.5, True, 0, True, 3, 0.5,
                             False, opt.DEVICE)
    epoch_size = len(train_dataset) // opt.DATALOADER.BATCH_SIZE

    train_loader = data.DataLoader(train_dataset,
                                   batch_size=opt.DATALOADER.BATCH_SIZE,
                                   num_workers=opt.DATALOADER.NUM_WORKERS,
                                   shuffle=True,
                                   collate_fn=detection_collate,
                                   pin_memory=True)
    # device = torch.device("cuda")
    trainer = Trainer(net,
                      optimizer,
                      criterion,
                      logger,
                      device=None,
                      scheduler=None)
    trainer.run(
        opt=opt,
        train_loader=train_loader,  # dataloader
        test_dataset=test_dataset,  # dataset
        epoch_size=epoch_size)
示例#29
0
import requests
from hashlib import md5, sha1
from typing import Dict, Optional, Tuple, TypedDict, List, cast
from operator import itemgetter
import urllib3
import http
import urllib.parse
from datetime import datetime as dt
from timeutils.time import currentTimestamp
import json

from .exceptions import connection_exception
from logger import make_logger

logger = make_logger('xiaomiGetter', 'dataGetter_log')


"""
information for authentication, used for establishing the connecetion
with xiaomi platform.
"""
AuthData = (
    TypedDict(
        'AuthData',
        {
            'account': str,
            'password': str,
            'appId': str,            # called client_id in json request.
            # called client_secret in json request.
            'appKey': str,
示例#30
0
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()

from flask_login import LoginManager
login_manager = LoginManager()
login_manager.login_view = 'auth.login'

from .caching import CacheInstance
global_cache = CacheInstance()  # create cache instance here.

if db is not None:
    # Scheduler depends on db.
    from .dataGetter.dataloader.Scheduler import UpdateScheduler
    scheduler = UpdateScheduler()

logger = make_logger('app', 'app_log', logging.DEBUG)
logger.warning('initializing app')


def create_app(config_name: str, with_scheduler: bool = True) -> Flask:
    """ application factory function."""
    app = Flask(__name__)

    # load config
    app.config.from_object(config[config_name])
    config[config_name].init_app(app)

    moment.init_app(app)
    db.init_app(app)
    login_manager.init_app(app)
    if with_scheduler:
示例#31
0
import CurrentState as cs
import CurrentThreshold as ct
import logger
import XMLSerializers.XMLAdapter as xa
import Alert as a
from datetime import datetime as dt

log = logger.make_logger('main', "BatchCheck.log", r"E:\BatchMon\Check", 10).log

def make_FI_list(dbs_obj):
    FI_list = [row.attrs['source_DBName'].upper() for row in dbs_obj.row
                if row.attrs['source_DBName'] not in ('TestDB2', 'TestDB3')
                    and row.attrs['source_is_read_only'] == '0']
    return list(set(FI_list))


def process_batchCheck():
    width = 3.7
    log(20, 'Starting process_batchCheck, Config: width:{}'.format(width))
    with xa.XMLAdapter() as xa_obj:
        with open('E:\BatchMon\Alert\{}-alert.txt'.format(dt.now().strftime('%Y-%m-%dT%H%M%S')), 'w') as _f:
            dbs_obj = xa_obj.get_dbs_obj()
            FI_list = make_FI_list(dbs_obj)
            log(20, 'xa_obj.get_FI_list: FI_list:{}, dbs_obj:{}'.format(FI_list, dbs_obj))
            for dbName in FI_list:
                log(30, '------xxxxxxXXXXXX NEW FI XXXXXXxxxxxx---------')
                log(20, 'Current dbName:{}'.format(dbName))
                bth_obj = xa_obj.get_bth_obj(dbName)
                log(20, 'Get Batch Threshold Object(bth_obj) from DataBase for {}, bth_obj:{}'.format(dbName, bth_obj))
                threshold_obj = ct.CurrentThreshold(width, bth_obj)
                threshold_obj.process_bth_obj()
示例#32
0
        logger.info('google search successful {*args} {**kwargs}')
        return urls

    except HTTPError as e:
        logger.info(f'{e} at backoff {backoff}')
        return google_search(url, query, stop, backoff=backoff+1)


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--newspapers', default=["all", ], nargs='*')
    parser.add_argument('--num', default=10, nargs='?', type=int)
    parser.add_argument('--source', default='google', nargs='?', type=str)
    args = parser.parse_args()

    logger = make_logger('logger.log')
    logger.info(args)
    url_source = args.source

    #  get newspaper info
    newspapers = args.newspapers
    if newspapers == ['all', ]:
        newspapers = registry
    else:
        newspapers = [n for n in registry if n['newspaper_id'] in newspapers]
    print(newspapers)

    for newspaper in newspapers:
        #  collect_urls
        parser = newspaper['parser']
        checker = newspaper['checker']
示例#33
0
from .caching.caching import get_cache
from .caching.global_cache import GlobalCache
from .caching.global_cache import ModelDataEnum
from .models import ClimateArea
from .models import Company
from .models import Device
from .models import Location
from .models import OutdoorRecord
from .models import OutdoorSpot
from .models import Project
from .models import ProjectDetail
from .models import Spot
from .models import SpotRecord
from .models import User

logger = make_logger('modelOperation', 'modelOperation_log', DEBUG)
logger.propagate = False

T = TypeVar('T')
# from app import global_cache
app = importlib.import_module('app')
global_cache = app.global_cache


# TODO lazy load global_cache.global_cacheall so it is fully initialized.
PostData = Dict


@global_cache.global_cacheall
def cacher_test(cache):
    print(cache)
示例#34
0
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

try:
    import logger
    try:
        import config
        colored = getattr(config, 'log_colored', False)
    except ImportError:
        colored = False
    logger = logger.make_logger('pserver', debug=False, colored=colored)
except ImportError:
    import logging as logger

__author__ = "Robert Zaremba"
__version__ = version = "0.1"
__license__ = "Apache License v2"

from base import PServer
from protocols import *

示例#35
0
def train(config_file, resume=False, **kwargs):
    cfg.merge_from_file(config_file)
    if kwargs:
        opts = []
        for k, v in kwargs.items():
            opts.append(k)
            opts.append(v)
        cfg.merge_from_list(opts)
    cfg.freeze()

    # [PersonReID_Dataset_Downloader(cfg.DATASETS.STORE_DIR,dataset) for dataset in cfg.DATASETS.SOURCE]
    # [PersonReID_Dataset_Downloader(cfg.DATASETS.STORE_DIR,dataset) for dataset in cfg.DATASETS.TARGET]
    output_dir = cfg.OUTPUT_DIR
    if output_dir and not os.path.exists(output_dir):
        os.makedirs(output_dir)

    logger = make_logger("Reid_Baseline", output_dir, 'log', resume)
    if not resume:
        logger.info("Using {} GPUS".format(1))
        logger.info("Loaded configuration file {}".format(config_file))
        logger.info("Running with config:\n{}".format(cfg))

    checkpoint_period = cfg.SOLVER.CHECKPOINT_PERIOD
    eval_period = cfg.SOLVER.EVAL_PERIOD
    output_dir = cfg.OUTPUT_DIR
    device = torch.device(cfg.DEVICE)
    epochs = cfg.SOLVER.MAX_EPOCHS

    train_loader, _, _, num_classes = data_loader(cfg,
                                                  cfg.DATASETS.SOURCE,
                                                  merge=cfg.DATASETS.MERGE)

    model = getattr(models, cfg.MODEL.NAME)(num_classes, cfg.MODEL.LAST_STRIDE,
                                            cfg.MODEL.POOL)
    if resume:
        checkpoints = get_last_stats(output_dir)
        try:
            model_dict = torch.load(checkpoints[cfg.MODEL.NAME])
        except KeyError:
            model_dict = torch.load(checkpoints[str(type(model))])
        model.load_state_dict(model_dict)
        if device:
            model.to(device)  # must be done before the optimizer generation
    optimizer = make_optimizer(cfg, model)
    scheduler = make_scheduler(cfg, optimizer)
    base_epo = 0
    if resume:
        optimizer.load_state_dict(torch.load(checkpoints['opt']))
        sch_dict = torch.load(checkpoints['sch'])
        scheduler.load_state_dict(sch_dict)
        base_epo = checkpoints['epo']

    loss_fn = make_loss(cfg)

    if not resume:
        logger.info("Start training")
    since = time.time()
    for epoch in range(epochs):
        count = 0
        running_loss = 0.0
        running_acc = 0
        for data in tqdm(train_loader, desc='Iteration', leave=False):
            model.train()
            images, labels, domains = data
            if device:
                model.to(device)
                images, labels, domains = images.to(device), labels.to(
                    device), domains.to(device)

            optimizer.zero_grad()

            scores, feats = model(images)
            loss = loss_fn(scores, feats, labels)

            loss.backward()
            optimizer.step()

            count = count + 1
            running_loss += loss.item()
            running_acc += (
                scores[0].max(1)[1] == labels).float().mean().item()

        logger.info(
            "Epoch[{}] Iteration[{}/{}] Loss: {:.3f}, Acc: {:.3f}, Base Lr: {:.2e}"
            .format(epoch + 1 + base_epo, count, len(train_loader),
                    running_loss / count, running_acc / count,
                    scheduler.get_lr()[0]))
        scheduler.step()

        if (epoch + 1 + base_epo) % checkpoint_period == 0:
            model.cpu()
            model.save(output_dir, epoch + 1 + base_epo)
            torch.save(
                optimizer.state_dict(),
                os.path.join(output_dir,
                             'opt_epo' + str(epoch + 1 + base_epo) + '.pth'))
            torch.save(
                scheduler.state_dict(),
                os.path.join(output_dir,
                             'sch_epo' + str(epoch + 1 + base_epo) + '.pth'))

        # Validation
        if (epoch + base_epo + 1) % eval_period == 0:
            # Validation on Target Dataset
            for target in cfg.DATASETS.TARGET:
                mAPs = []
                cmcs = []
                for i in range(iteration):

                    set_seeds(i)

                    _, val_loader, num_query, _ = data_loader(cfg, (target, ),
                                                              merge=False)

                    all_feats = []
                    all_pids = []
                    all_camids = []

                    since = time.time()
                    for data in tqdm(val_loader,
                                     desc='Feature Extraction',
                                     leave=False):
                        model.eval()
                        with torch.no_grad():
                            images, pids, camids = data
                            if device:
                                model.to(device)
                                images = images.to(device)

                            feats = model(images)
                            feats /= feats.norm(dim=-1, keepdim=True)

                        all_feats.append(feats)
                        all_pids.extend(np.asarray(pids))
                        all_camids.extend(np.asarray(camids))

                    cmc, mAP = evaluation(all_feats, all_pids, all_camids,
                                          num_query)
                    mAPs.append(mAP)
                    cmcs.append(cmc)

                mAP = np.mean(np.array(mAPs))
                cmc = np.mean(np.array(cmcs), axis=0)

                mAP_std = np.std(np.array(mAPs))
                cmc_std = np.std(np.array(cmcs), axis=0)

                logger.info("Validation Results: {} - Epoch: {}".format(
                    target, epoch + 1 + base_epo))
                logger.info("mAP: {:.1%} (std: {:.3%})".format(mAP, mAP_std))
                for r in [1, 5, 10]:
                    logger.info(
                        "CMC curve, Rank-{:<3}:{:.1%} (std: {:.3%})".format(
                            r, cmc[r - 1], cmc_std[r - 1]))

            reset()

    time_elapsed = time.time() - since
    logger.info('Training complete in {:.0f}m {:.0f}s'.format(
        time_elapsed // 60, time_elapsed % 60))
    logger.info('-' * 10)
示例#36
0
import json
import sys
import os
import sql_queries as sql_q
from filters_json import filter_list
from tweepy.streaming import StreamListener
from tweepy import OAuthHandler
from tweepy import Stream
from logger import make_logger


req_tok_url = 'https://api.twitter.com/oauth/request_token'
oauth_url = 'https://api.twitter.com/oauth/authorize'
acc_tok_url = 'https://api.twitter.com/oauth/access_token'

logger = make_logger('push_db', 'retoracle.log')


def return_filters():
    filter_l = []
    for i in filters:
        for y in filters[i]['search_terms']:
            for x in filters[i]['search_terms'][y]:
                filter_l.append(x)
    return filter_l


def return_blacklist():
    blackList = []
    for i in filters:
        for y in filters[i]['blacklist']:
示例#37
0
import time, datetime
import json
import sys
import os
import sql_queries as sql_q
from filters_json import filter_list
from tweepy.streaming import StreamListener
from tweepy import OAuthHandler
from tweepy import Stream
from logger import make_logger

req_tok_url = 'https://api.twitter.com/oauth/request_token'
oauth_url = 'https://api.twitter.com/oauth/authorize'
acc_tok_url = 'https://api.twitter.com/oauth/access_token'

logger = make_logger('push_db', 'retoracle.log')


def return_filters():
    filter_l = []
    for i in filters:
        for y in filters[i]['search_terms']:
            for x in filters[i]['search_terms'][y]:
                filter_l.append(x)
    return filter_l


def return_blacklist():
    blackList = []
    for i in filters:
        for y in filters[i]['blacklist']:
示例#38
0
文件: train.py 项目: manutdzou/ReID
def train(config_file, **kwargs):
    cfg.merge_from_file(config_file)
    if kwargs:
        opts = []
        for k, v in kwargs.items():
            opts.append(k)
            opts.append(v)
        cfg.merge_from_list(opts)
    cfg.freeze()

    #PersonReID_Dataset_Downloader('./datasets',cfg.DATASETS.NAMES)

    output_dir = cfg.OUTPUT_DIR
    if output_dir and not os.path.exists(output_dir):
        os.makedirs(output_dir)

    logger = make_logger("Reid_Baseline", output_dir, 'log')
    logger.info("Using {} GPUS".format(1))
    logger.info("Loaded configuration file {}".format(config_file))
    logger.info("Running with config:\n{}".format(cfg))

    checkpoint_period = cfg.SOLVER.CHECKPOINT_PERIOD
    eval_period = cfg.SOLVER.EVAL_PERIOD
    output_dir = cfg.OUTPUT_DIR
    device = torch.device(cfg.DEVICE)
    epochs = cfg.SOLVER.MAX_EPOCHS
    method = cfg.DATALOADER.SAMPLER

    train_loader, val_loader, num_query, num_classes = data_loader(
        cfg, cfg.DATASETS.NAMES)

    model = getattr(models, cfg.MODEL.NAME)(num_classes, cfg.MODEL.LAST_STRIDE)

    if 'center' in method:
        loss_fn, center_criterion = make_loss(cfg)
        optimizer, optimizer_center = make_optimizer_with_center(
            cfg, model, center_criterion)
    else:
        loss_fn = make_loss(cfg)
        optimizer = make_optimizer(cfg, model)

    scheduler = make_scheduler(cfg, optimizer)

    logger.info("Start training")
    since = time.time()
    for epoch in range(epochs):
        count = 0
        running_loss = 0.0
        running_acc = 0
        for data in tqdm(train_loader, desc='Iteration', leave=False):
            model.train()
            images, labels = data
            if device:
                model.to(device)
                images, labels = images.to(device), labels.to(device)

            optimizer.zero_grad()
            if 'center' in method:
                optimizer_center.zero_grad()

            scores, feats = model(images)
            loss = loss_fn(scores, feats, labels)

            loss.backward()
            optimizer.step()
            if 'center' in method:
                for param in center_criterion.parameters():
                    param.grad.data *= (1. / cfg.SOLVER.CENTER_LOSS_WEIGHT)
                optimizer_center.step()

            count = count + 1
            running_loss += loss.item()
            running_acc += (scores.max(1)[1] == labels).float().mean().item()

        logger.info(
            "Epoch[{}] Iteration[{}/{}] Loss: {:.3f}, Acc: {:.3f}, Base Lr: {:.2e}"
            .format(epoch + 1, count, len(train_loader), running_loss / count,
                    running_acc / count,
                    scheduler.get_lr()[0]))
        scheduler.step()

        if (epoch + 1) % checkpoint_period == 0:
            model.cpu()
            model.save(output_dir, epoch + 1)

        # Validation
        if (epoch + 1) % eval_period == 0:
            all_feats = []
            all_pids = []
            all_camids = []
            for data in tqdm(val_loader,
                             desc='Feature Extraction',
                             leave=False):
                model.eval()
                with torch.no_grad():
                    images, pids, camids = data
                    if device:
                        model.to(device)
                        images = images.to(device)

                    feats = model(images)

                all_feats.append(feats)
                all_pids.extend(np.asarray(pids))
                all_camids.extend(np.asarray(camids))

            logger.info("start evaluation")
            cmc, mAP = evaluation(all_feats, all_pids, all_camids, num_query)
            logger.info("Validation Results - Epoch: {}".format(epoch + 1))
            logger.info("mAP: {:.1%}".format(mAP))
            for r in [1, 5, 10]:
                logger.info("CMC curve, Rank-{:<3}:{:.1%}".format(
                    r, cmc[r - 1]))

    time_elapsed = time.time() - since
    logger.info('Training complete in {:.0f}m {:.0f}s'.format(
        time_elapsed // 60, time_elapsed % 60))
    logger.info('-' * 10)
示例#39
0
from pathlib import Path

# Environment Variables
env = os.environ
load_dotenv()

# The time in seconds the bot should sleep until it checks again.
SLEEP = 600

# LOGGING CONFIGURATION
LOG_FILENAME = "bot.log"
LOG_FILE_BACKUPCOUNT = 5
LOG_FILE_MAXSIZE = 1024 * 256

# LOGGING SETUP
log = logger.make_logger("bot", LOG_FILENAME, logging_level=logging.DEBUG)


# MAIN PROCEDURE
def run_bot():
    # -- progmetalbot useragent and version --
    app_useragent_version = env['APP_USERAGENT'] + ' ' + env[
        'APP_VERSION'] + " by u/" + settings.USER_TO_MESSAGE
    # -- praw --
    reddit = praw.Reddit(user_agent=app_useragent_version,
                         client_id=env['REDDIT_CLIENT_ID'],
                         client_secret=env['REDDIT_CLIENT_SECRET'],
                         password=env['REDDIT_PASSWORD'],
                         username=env['REDDIT_USERNAME'])
    subreddit = reddit.subreddit(settings.REDDIT_SUBREDDIT)
    # -- musicbrainz --