def update_best(self, state):
        local_best = state.local_best
        get_logger().debug(
            f'At leaf, local best = {local_best}, global = {self.global_best}')

        if self.global_best == None or local_best > self.global_best:
            self.global_best = local_best
            self.global_best_state = state
Exemple #2
0
 def get_next_states(self, visited_states=set()):
     states = set()
     for edge in self.free_edges.edges:
         next_state = copy.deepcopy(self)
         next_state.add_include_edge(*edge)
         if next_state in visited_states:
             continue
         next_state.propagate_constraints(edge)
         get_logger().debug(f'Added next state {next_state}')
         states.add(next_state)
         visited_states.add(next_state)
     return states
Exemple #3
0
    def __init__(
        self,
        ids: Optional[list] = None,
        bbox: Optional[BoundingBox] = None,
        intersects: Optional[Geometry] = None,
        contains: Optional[Geometry] = None,
        time: Optional[str] = None,
        limit: Optional[int] = None,
        time_series: Optional[list] = None,
        **kwargs,
    ):  # pylint: disable=too-many-arguments

        if bbox and intersects or intersects and contains or contains and bbox:
            raise ValueError("""Only one of the following query parameters is
                allowed at a query at any given time:
                * bbox
                * intersects
                * contains.""")

        self.__dict__.update(
            kwargs
        )  # At the front so we do not accidentally overwrite some members
        self.ids = ids
        self.bbox = bbox
        self.intersects = intersects
        self.contains = contains

        if not STACQuery.validate_datetime_str(time):
            raise ValueError("""Time string could not be validated.
                It must be one of the following formats:
                <RFC3339> (for points in time)
                <RFC3339>/<RFC3339> (for datetime ranges)""")
        self.time = time

        self.limit = 1 if limit is None else limit
        if limit is not None and limit < 0:
            helper.get_logger(__name__).warning(
                "WARNING: limit parameter cannot be < 0, and has been automatically set to 1"
            )
            self.limit = 1

        if time_series is not None:
            for datestr in time_series:
                if not STACQuery.validate_datetime_str(datestr):
                    raise ValueError(
                        """Time string from time_series could not be validated.
                        It must be one of the following formats:
                        <RFC3339> (for points in time)
                        <RFC3339>/<RFC3339> (for datetime ranges)""")
        self.time_series = time_series
Exemple #4
0
    def run(self):
        logger = get_logger('SessionMgr')
        threading_lock_interval_counter = 0
        while True:
            threading_lock_interval_counter += 1 
            time.sleep(self.polling_interval)
            for k in self.data_set.keys():
                v = self.data_set[k]
                status = v["status"]   
                now = time.time()

                if (now - v['timer']) > self.absolute_timeout:
                    try:
                        channel = v["channel"]
                        if channel in "cli snmp":
                            timestamp = time.strftime('%Y-%m-%d %H:%M:%S')
                            device_id = v["device_info"]["device_id"]
                            self.pedding_fuc(channel,device_id,now,timestamp)
                        del self.data_set[k]
                        logger.info('Task %s Timeout, cleared' % k)
                    except Exception,e:
                        logger.error(str(e))

                elif "finish_timer" in v and (now - v['finish_timer']) > self.after_finish_timeout:
                    del self.data_set[k]
                    logger.info('Task %s Finished, cleared' % k)
Exemple #5
0
def add_msg():
    logger = get_logger('logs/add_msg.log')
    logger.info("run add msg.")

    # touser = "******"
    touser = "******"

    _send_hour = []
    _send_day = 0

    while True:
        now = datetime.datetime.now()
        day = now.day
        hour = now.hour
        if _send_day != day: _send_hour = []
        if hour not in _send_hour:
            _send_day = day
            content = get_msg_content()
            _send_hour.append(hour)
            if content:
                msg = {"to": touser, "msg": content}
                logger.debug('msg: {0}'.format(content.encode('utf8')))
                logger.info("will send '{0}' to '{1}'".format(
                    content.encode('utf8'), touser))
                jobqe.add_job(settings.msg_queue_name, msg)
        time.sleep(random.randint(1, 10) * 60)
Exemple #6
0
def initialize():
    """
    Setup neural networks and tensorboard logging.
    """

    discriminator = Discriminator().to(config.device)
    policy = generator.Policy().to(config.device)
    rollout = generator.Policy().to(config.device)

    discriminator.criterion = torch.nn.BCELoss()
    discriminator.optimizer = torch.optim.Adam(discriminator.parameters(),
                                               lr=config.d_learnrate)
    policy.optimizer = torch.optim.Adam(policy.parameters(),
                                        lr=config.g_learnrate)

    hyperparameter = {
        k: v
        for k, v in config.__dict__.items() if not v == config.device
    }

    notes = ''''''

    store.setup(loader.make_directory_with_timestamp(), hyperparameter, notes)
    store.set('Policy Step', 0)
    os.makedirs('{}/policies'.format(store.folder))

    log = get_logger(store.folder, 'errors')

    return discriminator, policy, rollout, log
Exemple #7
0
    def start_tests(self):
        test_num = 1
        for test in self.__test_list:
            logger = get_logger(test_num)
            list_flows = []
            test_obj = Tests(self.__w3, logger, self.__inspector)
            try:
                func_args = test["args"]

                if test_obj.is_thread(test["func"]):
                    list_flows = []
                    flows = test["flows"]
                    if flows > len(self.accounts):
                        flows = len(self.accounts)

                    for i in range(flows):
                        j = i + 1
                        if j == len(self.accounts):
                            j = 0
                        accounts = (self.accounts[i][0], self.accounts[i][1],
                                    self.accounts[j][0], self.accounts[j][1])
                        func_args.insert(0, accounts)
                        list_flows.append(
                            Thread(target=test_obj.start_test,
                                   args=(test["func"], func_args.copy())))
                        func_args.pop(0)
                else:
                    list_flows.append(
                        Thread(target=test_obj.start_test,
                               args=(test["func"], func_args.copy())))

                logger.info("Start test {0}(flows: {3}): {1}{2}".format(
                    test_num, test["func"], test["args"], len(list_flows)))

                for flow in list_flows:
                    flow.start()
                while True:
                    list_alive = [
                        state.is_alive() for state in list_flows
                        if state.is_alive()
                    ]
                    if not list_alive:
                        break

            except TypeError as e:
                logger.error("\tUnhandled error in starting {2}:{0}{1}".format(
                    e.__class__.__name__, e, test["func"]))

                continue

            finally:
                test_num += 1
                sleep(
                    5
                )  # There are cases when transactions in same tests do not have time to process on the node
 def find_best(self):
     self.start_time = time.time()
     pq = PriorityQueue()
     pq.put((self.original_state.local_best, self.original_state))
     while (not pq.empty()):
         local_best, current_state = pq.get()
         if self.is_visited(current_state):
             continue
         self.iter_count += 1
         get_logger().debug(f'current_state: {current_state}')
         if current_state.worst_than(self.global_best):
             get_logger().debug(
                 f'current_state is worst than global best: {self.global_best}'
             )
             continue
         next_states = current_state.get_next_states()
         if len(next_states) == 0:
             self.update_best(current_state)
         for next_s in next_states:
             pq.put((next_s.local_best, next_s))
     self.complete_time = time.time() - self.start_time
     return self.global_best_state
Exemple #9
0
    def __init__(self, params):
        """
        Constructor for the main function. Loads data and creates computation graph.

        Parameters
        ----------
        params:        Hyperparameters of the model

        Returns
        -------
        """
        self.p = params

        if not os.path.isdir(self.p.log_dir):
            os.mkdir(self.p.log_dir)
        if not os.path.isdir(self.p.emb_dir):
            os.mkdir(self.p.emb_dir)

        self.logger = hp.get_logger(self.p.name, self.p.log_dir,
                                    self.p.config_dir)

        self.logger.info(vars(self.p))
        pprint(vars(self.p))
        self.p.batch_size = self.p.batch_size

        if self.p.l2 == 0.0:
            self.regularizer = None
        else:
            self.regularizer = tf.contrib.layers.l2_regularizer(
                scale=self.p.l2)

        self.load_data()
        self.add_placeholders()

        nn_out = self.add_model()
        self.loss = self.add_loss_op(nn_out)

        if self.p.opt == 'adam':
            self.train_op = self.add_optimizer(self.loss)
        else:
            self.train_op = self.add_optimizer(self.loss, isAdam=False)

        self.merged_summ = tf.summary.merge_all()
Exemple #10
0
def wechat():
    logger = get_logger('logs/wechat.log')

    import time

    import itchat
    from itchat.content import TEXT

    @itchat.msg_register([TEXT])
    def text_reply(msg):
        logger.info('{0}: [{1}]{2}'.format(msg['FromUserName'], msg['Type'],
                                           msg['Text'].encode('utf8')))

    itchat.auto_login(enableCmdQR=True, hotReload=True)
    itchat.run(blockThread=False)

    logger.info("wechat login success.")

    while True:
        time.sleep(10)
        x = jobqe.get_job(settings.msg_queue_name)
        if not isinstance(x, dict): continue
        data = x.get('data')
        if not data or not isinstance(data, dict): continue
        try:
            msg = data.get('Data')
            touser = msg.get('to')
            content = msg.get('msg')
            if not touser or not content: continue
            touser = touser.strip()
            us = itchat.search_friends(name=touser)
            if len(us) == 0:
                logger.info('no such user named: {0}'.format(touser))
                continue
            u = us[0]
            itchat.send(content, toUserName=u["UserName"])
            logger.info('send "{0}" to "{1}".'.format(content.encode('utf8'),
                                                      touser.encode('utf8')))
        except:
            logger.error(traceback.format_exc())
        jobqe.ack_job(data.get('ID'))
Exemple #11
0
parser.add_argument('--num_batches',
                    type=int,
                    default=10,
                    help='number of batches to evaluate')
opt = parser.parse_args()
print(opt)

# ----------
#  Cuda or cpu
# ----------
cuda = is_cuda(opt.use_cpu)

# Logging init
if opt.logging:
    print("Init logging...")
    d_eval_logger_original = helper.get_logger(opt.log_port, 'd_eval_original')
    d_eval_logger_cv2 = helper.get_logger(opt.log_port, 'd_eval_cv2')
    d_eval_logger_completed = helper.get_logger(opt.log_port,
                                                'd_eval_completed')
    viz_image_logger = Visdom(port=opt.log_port, env="images")

# Loss function
criteria = torch.nn.BCELoss()

# Load generator and discriminator
generator = Generator(opt.batch_size, opt.latent_dim, opt.channels)
discriminator = Discriminator(opt.batch_size, opt.channels)
for model, model_name in [(generator, 'g'), (discriminator, 'd')]:
    load_model(cuda, model, model_name)

if cuda:
Exemple #12
0
# -*- coding=UTF-8 -*-
import argparse
import json
import re

from sqlalchemy import exc, func
from helper import get_logger, get_session, timeit
from models import FBShop, FBShopType
from fb_api import FBBot

__author__ = "Wen-Hao Lee"
__email__ = "*****@*****.**"
__copyright__ = "Copyright 2014, Numnum"

logger = get_logger('fb_fetcher', __file__)


class FBFetcher(object):
    def __init__(self, env):
        self.fb_bot = FBBot()
        self.session = get_session(env)

    @timeit(logger)
    def fetch_new_shops(self, limit=None):
        fb_shops = (self.session.query(FBShop).filter(
            FBShop.name == None).order_by('update_ts').limit(limit).all())
        processed = 0
        for fb_shop in fb_shops:
            processed += self._fetch_info(fb_shop)
        logger.info("Try to fetch %d new fb_shops info, %d fetched" %
Exemple #13
0
import json
import allParsers
import helper
logger = helper.get_logger()

def parseBlogs(domain, lock, search_results, filename, search_url):
  logger.info('%s %s', search_results[0].link, filename)
  sr_mapping = {result.link: result for result in search_results}
  results = allParsers.mapper[domain].parse_all(sr_mapping.keys())
  docs = {'docs':[]}
  docs['search'] = search_url

  for url in results:
    search_result = sr_mapping[url]
    try:
      text = [content.text for content in results[url].content]
    except Exception, e:
      logger.error("Article Parsing Failed URL: %s %s", url, e)
      text =  ['Failure to Parse']
    
    article = '\n'.join(text)

    doc = {}
    doc['id'] = hash(url)
    doc['url'] = url
    doc['title'] = search_result.title
    doc['date'] = search_result.date
    doc['article'] = article
    doc['comment_url'] = results[url].comment_url
    comments = []
    for comment in results[url].comments:
Exemple #14
0
                    help='if testing on cpu')
opt = parser.parse_args()
print(opt)

cuda = is_cuda(opt.use_cpu)


# Create z noise for generator input
def create_noise(batch_size, latent_dim):
    return Variable(
        Tensor(batch_size, latent_dim).normal_().view(-1, latent_dim, 1, 1))


# Logging
if opt.logging:
    d_real_loss_logger = helper.get_logger(opt.log_port, 'd_loss_real')
    d_fake_loss_logger = helper.get_logger(opt.log_port, 'd_loss_fake')
    d_total_loss_logger = helper.get_logger(opt.log_port, 'd_loss_total')
    g_loss_logger = helper.get_logger(opt.log_port, 'g_loss')
    viz_image_logger = Visdom(port=opt.log_port, env="images")

# Loss function
criteria = torch.nn.BCELoss()

# Initialize generator and discriminator
generator = Generator(opt.batch_size, opt.latent_dim, opt.channels)
discriminator = Discriminator(opt.batch_size, opt.channels)

if cuda:
    generator.cuda()
    discriminator.cuda()
Exemple #15
0
# -*- coding=UTF-8 -*-
import os
import json
import argparse

from sqlalchemy import exc
from helper import get_logger, base_path, get_session
from models import Track

__author__ = "Wen-Hao Lee"
__email__ = "*****@*****.**"
__copyright__ = "Copyright 2014, Numnum"

logger = get_logger('geo_walker', __file__)


class CityWalker(object):
    rad = 500  # m

    def __init__(self, area, env):
        assert area in ['small', 'medium', 'large']
        area_turns = {'small': 10, 'medium': 50, 'large': 120}
        self.total_turns = area_turns[area]
        self.session = get_session(env)

    def load(self, file_name):
        file_fullpath = os.path.join(base_path, 'data', file_name)
        city_data = json.load(open(file_fullpath))
        self.cities = city_data['cities']

    def walk_all_cities(self):
Exemple #16
0
from pathlib import Path
import shutil
from string import Template
import uuid
import copy

import xml.etree.ElementTree as Et
from geojson import FeatureCollection, Feature
import rasterio

from helper import (load_params, load_metadata,
                    ensure_data_directories_exist, save_metadata, get_logger,
                    read_write_bigtiff, SENTINEL1_L1C_GRD, SNAP_POLARIMETRIC)
from capabilities import set_capability

LOGGER = get_logger(__name__)
PARAMS_FILE = os.environ.get("PARAMS_FILE")
GPT_CMD = "{gpt_path} {graph_xml_path} -e {source_file}"


# pylint: disable=unnecessary-pass
class WrongPolarizationError(ValueError):
    """
    This class passes to the next input file, if the current input file
    does not include the polarization.
    """
    pass


class SNAPPolarimetry:
    """
Exemple #17
0
# -*- coding=UTF-8 -*-
import argparse

from sqlalchemy import exc
from helper import get_logger, get_session, timeit
from models import FBShop, GGShop
from gg_api import GGBot

__author__ = "Wen-Hao Lee"
__email__ = "*****@*****.**"
__copyright__ = "Copyright 2014, Numnum"

logger = get_logger('gg_matcher', __file__)


class GGMatcher(object):
    def __init__(self, env, match_type='restaurant'):
        self.gg_bot = GGBot()
        self.session = get_session(env)
        self.match_type = match_type

    @timeit(logger)
    def match_fb_shops(self, limit=None):
        fb_shops = (
            self.session.query(FBShop)
            .filter(FBShop.name != None)
            .filter(FBShop.ggid == None)
            .order_by('update_ts').limit(limit).all())
        processed = 0
        for fb_shop in fb_shops:
            processed += self._find_gg_shop(fb_shop)
Exemple #18
0
                        '--debug',
                        help='enable debug output',
                        action='store_true')
    parser.add_argument('-bd',
                        '--backend_debug',
                        help='enable debug output for the backend',
                        action='store_true')
    parser.add_argument('-mf',
                        '--max_failures',
                        help='maximum amount of failures to allow',
                        type=int)
    args = parser.parse_args()

    # init logger
    debug = args.debug
    logger = get_logger("Config2Spec", 'DEBUG' if debug else 'INFO')

    # name of the scenario
    scenario = os.path.basename(args.scenario_path)

    # all the necessary paths where config, fib and topology files are being stored
    batfish_path = args.batfish_path  # path to cloned Batfish repo directory
    backend_path = args.backend_path  # path to Batfish executable
    scenario_path = args.scenario_path

    batfish_port = args.port

    # create backend manager
    ms_manager = init_manager(backend_path, batfish_port)

    # general settings
Exemple #19
0
            print traceback.format_exc()
            self.write(
                json.dumps(
                    dict(status='error',
                         message='Session error,please refresh')))

        self.finish()


if __name__ == '__main__':
    options.define("p", default=7777, help="run on the given port", type=int)
    options.parse_command_line()
    config = Configurations()
    port = options.options.p

    logger = get_logger("server", logging.DEBUG)

    loop = ZMQIOLoop()
    loop.install()
    context = zmq.Context()
    zmq_publish = context.socket(zmq.PUB)
    zmq_publish.bind("tcp://127.0.0.1:%s" %
                     str(config.get_configuration("zmqPublish")))
    zmq_dispatch = context.socket(zmq.REP)
    zmq_dispatch.bind("tcp://127.0.0.1:%s" %
                      str(config.get_configuration("zmqDispatch")))
    zmq_result = context.socket(zmq.PULL)
    zmq_result.bind("tcp://127.0.0.1:%s" %
                    str(config.get_configuration("zmqResult")))
    receiver = ZMQStream(zmq_result)
    receiver.on_recv(on_worker_data_in)
Exemple #20
0
import os
import sys
import time
import subprocess
import urllib2
from datetime import datetime
import argparse
import logging
import time
import socket
import math
import helper


time_diff = helper.time_diff
logger = helper.get_logger('exp_checkpoint')


class WorkerCommander:
    def __init__(self, srv_ip, srv_port):
        self.srv_ip = srv_ip
        self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        self.s.connect((srv_ip, srv_port))

    def send_line(self, text):
        self.s.send(text+'\n')

    def recv_line(self):
        return self.s.recv(1500)

    def invoke_cmd(self, cmd):
Exemple #21
0
import os
import requests
import time
import ConfigParser
import logging
import logging.config

from requests.exceptions import RequestException
from helper import get_logger, base_path

config = ConfigParser.ConfigParser()
config.read(os.path.join(base_path, 'config.ini'))
logger = get_logger('fb_api', __file__)

__author__ = "Wen-Hao Lee"
__email__ = "*****@*****.**"
__copyright__ = "Copyright 2014, Numnum"


class FBBot(object):
    graph_url = "https://graph.facebook.com"
    cooldown = 120  # sec
    search_radius = 500  # m

    def search_restaurant(self, lat, lon):
        restaurants = self._search_place('restaurant', lat, lon)
        steakhouses = self._search_place('steakhouse', lat, lon)
        bars = self._search_place('bar', lat, lon)
        return restaurants + steakhouses + bars

    def _search_place(self, query, lat, lon):
Exemple #22
0
parser.add_argument('--hub_ip', default='52.3.220.217')
parser.add_argument('--worker_ip', default='52.91.124.236')
parser.add_argument('--worker_port', default=1234, type=int)
# curl should support c-ares, which is need for --dns-servers option
parser.add_argument('--curl_bin_path', default='/Users/liang/.local/curl/bin/curl')
parser.add_argument('--curl_write_out', default='@/elasticity/scripts/curl-format-compact', help='control curl to display info. See curl --write-out')
parser.add_argument('--curl_output', default='curl.out')
parser.add_argument('-c', '--count', default=10, type=int, help='number of times to run the test')
parser.add_argument('--pico_id', default=-1, type=int)
parser.add_argument('--pico_state', default='hot', help='pick expected state of the pico from cold and hot')
parser.add_argument('--wait_time', default=1, type=int, help='sleep $wait_time seconds in each runs of non-hot pico experiments')
parser.add_argument('--set_mail_debug', default=False, action="store_true", help='use this flag to debug send mail')
parser.add_argument('--full_restore', default=False, action="store_true", help='download all pages, restore at once')
args = None #parser.parse_args()

logger = helper.get_logger()


def make_pico_uri(pico_id):
    fmt = '{0:05}.pico'
    return fmt.format(pico_id)


def ping(address):
    count = 5
    ret = subprocess.check_output(['ping', '-c', str(count), address])
    lines = ret.splitlines()
    for i in range(1, 1+count):
        ping_line = 'PING: {0}'.format(lines[i])
        logger.info(ping_line)
    ping_stat_1 = 'PING-STAT-1: '+lines[-2]
# As of 2021 Feb 7, most of DMS is not supported as a Type for ASFF Resources
# https://docs.aws.amazon.com/securityhub/latest/userguide/securityhub-findings-format-attributes.html#asff-resources

WIKI = 'https://wiki.bnc.ca/x/g6SHM'

import json
import boto3
from helper import Evaluation, get_logger, adjust_tag_value

# The DMS Endpoint source engine does not have ssl_mode options/requirement
ENDPOINTS_SSLMODE_ENGINE_EXCEPTION = [
    'Amazon S3', 'Amazon DynamoDB', 'Amazon Kinesis', 'Amazon Neptune',
    'Amazon Redshift', 'Elasticsearch Service', 'Kafka'
]

log = get_logger()


def resource_types():
    return (
        'AWS::DMS::Endpoint',
        'AWS::DMS::ReplicationTask',
        'AWS::DMS::ReplicationInstance',
    )


def evaluate_compliance_scheduled(event, credentials):
    dms_client = boto3.client(
        'dms',
        aws_access_key_id=credentials['AccessKeyId'],
        aws_secret_access_key=credentials['SecretAccessKey'],
Exemple #24
0
# -*- coding=UTF-8 -*-
import argparse

from sqlalchemy import exc, or_, func
from helper import get_logger, get_session, timeit
from models import Track, FBShop
from fb_api import FBBot

__author__ = "Wen-Hao Lee"
__email__ = "*****@*****.**"
__copyright__ = "Copyright 2014, Numnum"

logger = get_logger('fb_searcher', __file__)


class FBSearcher(object):
    def __init__(self, env):
        self.fb_bot = FBBot()
        self.session = get_session(env)

    @timeit(logger)
    def search_locations(self, limit=None):
        locs = (
            self.session.query(Track)
            .order_by('update_ts').limit(limit).all())
        updated = 0
        for loc in locs:
            updated += self.search_location(loc)
        return updated

    def search_location(self, loc):

import qiskit as q
from typing import List, Tuple
from qiskit.circuit.quantumcircuit import QuantumCircuit, QuantumRegister

from helper import grover
from helper import get_logger
from helper import measure_all
from helper import encode_graph
from helper import encode_value
from helper import simulate_circuit
from helper import initialize_circuit
from helper import symbolic_simulation

logging = get_logger(__name__) 

def select_vertex(circuit : QuantumCircuit, registers : QuantumRegister, value : int, target : QuantumRegister, ancillas : QuantumRegister) -> QuantumCircuit:

    circuit.barrier()
    circuit = encode_value(circuit, registers[0], value, reverse=True)

    controlled = [q for register in registers for q in register]
    circuit.cnx(controlled, target, ancillas)

    circuit = encode_value(circuit, registers[0], value, reverse=True)

    return circuit

def find_edge(graph : List[Tuple[int, int, float]], current_mst : List[Tuple[int, int, float]], n_of_qbits_for_vertex : int = 3) -> Tuple[int, int]:
    """Quantum search algorithm to find an edge."""
Exemple #26
0
# -*- coding=UTF-8 -*-
import argparse
import json
import re

from sqlalchemy import exc, func
from helper import get_logger, get_session, timeit
from models import FBShop, FBShopType
from fb_api import FBBot

__author__ = "Wen-Hao Lee"
__email__ = "*****@*****.**"
__copyright__ = "Copyright 2014, Numnum"

logger = get_logger('fb_fetcher', __file__)


class FBFetcher(object):
    def __init__(self, env):
        self.fb_bot = FBBot()
        self.session = get_session(env)

    @timeit(logger)
    def fetch_new_shops(self, limit=None):
        fb_shops = (
            self.session.query(FBShop)
            .filter(FBShop.name == None)
            .order_by('update_ts').limit(limit).all())
        processed = 0
        for fb_shop in fb_shops:
Exemple #27
0
# -*- coding=UTF-8 -*-
import os
import json
import argparse

from sqlalchemy import exc
from helper import get_logger, base_path, get_session
from models import Track

__author__ = "Wen-Hao Lee"
__email__ = "*****@*****.**"
__copyright__ = "Copyright 2014, Numnum"

logger = get_logger('geo_walker', __file__)


class CityWalker(object):
    rad = 500  # m

    def __init__(self, area, env):
        assert area in ['small', 'medium', 'large']
        area_turns = {
            'small': 10,
            'medium': 50,
            'large': 120}
        self.total_turns = area_turns[area]
        self.session = get_session(env)

    def load(self, file_name):
        file_fullpath = os.path.join(base_path, 'data', file_name)
        city_data = json.load(open(file_fullpath))
Exemple #28
0
                    help='blend after completion?')
parser.add_argument('--num_batches',
                    type=int,
                    default=1,
                    help='number of batches to evaluate')
parser.add_argument('--mask_type', type=int, default=1, help='mask type')

opt = parser.parse_args()
print(opt)

cuda = is_cuda(opt.use_cpu)

# Logging
if opt.logging:
    print("Init logging...")
    contextual_loss_logger = helper.get_logger(opt.log_port, 'contextual_loss')
    perceptual_loss_logger = helper.get_logger(opt.log_port, 'perceptual_loss')
    completion_loss_logger = helper.get_logger(opt.log_port, 'completion_loss')
    viz_image_logger = Visdom(port=opt.log_port, env="images")

# Loss function
criteria = torch.nn.BCELoss()

# Load generator and discriminator
generator = Generator(opt.batch_size, opt.latent_dim, opt.channels)
discriminator = Discriminator(opt.batch_size, opt.channels)
for model, model_name in [(generator, 'g'), (discriminator, 'd')]:
    load_model(cuda, model, model_name)

if cuda:
    generator.cuda()
Exemple #29
0
def main(args):
    # setup logging
    log = get_logger(args.log)
    log(args)
    timestamp = datetime.now().strftime('%Y%m%d%H%M')
    tb_writer = SummaryWriter("./output/{}/{}/{}/logs/".format(args.model, args.expname, args.dataset)\
                          +timestamp) if args.visual else None

    config = getattr(configs, 'config_' + args.model)()

    # instantiate the dmm
    model = getattr(models, args.model)(config)
    model = model.cuda()
    if args.reload_from >= 0:
        load_model(model, args.reload_from)

    train_set = PolyphonicDataset(args.data_path + 'train.pkl')
    valid_set = PolyphonicDataset(args.data_path + 'valid.pkl')
    test_set = PolyphonicDataset(args.data_path + 'test.pkl')

    #################
    # TRAINING LOOP #
    #################
    times = [time.time()]
    for epoch in range(config['epochs']):

        train_loader = torch.utils.data.DataLoader(
            dataset=train_set,
            batch_size=config['batch_size'],
            shuffle=True,
            num_workers=1)
        train_data_iter = iter(train_loader)
        n_iters = train_data_iter.__len__()

        epoch_nll = 0.0  # accumulator for our estimate of the negative log likelihood (or rather -elbo) for this epoch
        i_batch = 1
        n_slices = 0
        loss_records = {}
        while True:
            try:
                x, x_rev, x_lens = train_data_iter.next()
            except StopIteration:
                break  # end of epoch
            x, x_rev, x_lens = gVar(x), gVar(x_rev), gVar(x_lens)

            if config['anneal_epochs'] > 0 and epoch < config[
                    'anneal_epochs']:  # compute the KL annealing factor
                min_af = config['min_anneal']
                kl_anneal = min_af + (
                    1.0 - min_af) * (float(i_batch + epoch * n_iters + 1) /
                                     float(config['anneal_epochs'] * n_iters))
            else:
                kl_anneal = 1.0  # by default the KL annealing factor is unity

            loss_AE = model.train_AE(x, x_rev, x_lens, kl_anneal)

            epoch_nll += loss_AE['train_loss_AE']
            i_batch = i_batch + 1
            n_slices = n_slices + x_lens.sum().item()

        loss_records.update(loss_AE)
        loss_records.update({'epo_nll': epoch_nll / n_slices})
        times.append(time.time())
        epoch_time = times[-1] - times[-2]
        log("[Epoch %04d]\t\t(dt = %.3f sec)" % (epoch, epoch_time))
        log(loss_records)
        if args.visual:
            for k, v in loss_records.items():
                tb_writer.add_scalar(k, v, epoch)
        # do evaluation on test and validation data and report results
        if (epoch + 1) % args.test_freq == 0:
            save_model(model, epoch)
            test_loader = torch.utils.data.DataLoader(
                dataset=test_set,
                batch_size=config['batch_size'],
                shuffle=False,
                num_workers=1)
            for x, x_rev, x_lens in test_loader:
                x, x_rev, x_lens = gVar(x), gVar(x_rev), gVar(x_lens)
                test_nll = model.valid(x, x_rev, x_lens) / x_lens.sum()
            log("[val/test epoch %08d]  %.8f" % (epoch, test_nll))
Exemple #30
0
# -*- coding=UTF-8 -*-
import argparse

from sqlalchemy import exc
from helper import get_logger, get_session, timeit
from models import FBShop, GGShop
from gg_api import GGBot

__author__ = "Wen-Hao Lee"
__email__ = "*****@*****.**"
__copyright__ = "Copyright 2014, Numnum"

logger = get_logger('gg_matcher', __file__)


class GGMatcher(object):
    def __init__(self, env, match_type='restaurant'):
        self.gg_bot = GGBot()
        self.session = get_session(env)
        self.match_type = match_type

    @timeit(logger)
    def match_fb_shops(self, limit=None):
        fb_shops = (self.session.query(FBShop).filter(
            FBShop.name != None).filter(
                FBShop.ggid == None).order_by('update_ts').limit(limit).all())
        processed = 0
        for fb_shop in fb_shops:
            processed += self._find_gg_shop(fb_shop)
        logger.info("Try to match %d gg_shops for fb_shops, %d matched" %
                    (len(fb_shops), processed))
Exemple #31
0
# -*- coding=UTF-8 -*-
import argparse
import re

from sqlalchemy import exc, func
from helper import get_logger, get_session, timeit
from models import GGShop
from gg_api import GGBot

__author__ = "Wen-Hao Lee"
__email__ = "*****@*****.**"
__copyright__ = "Copyright 2014, Numnum"

logger = get_logger('gg_fetcher', __file__)


class GGFetcher(object):

    def __init__(self, env):
        self.gg_bot = GGBot()
        self.session = get_session(env)

    @timeit(logger)
    def fetch_new_shops(self, limit=None):
        gg_shops = (
            self.session.query(GGShop)
            .filter(GGShop.name == None)
            .order_by('update_ts').limit(limit).all())
        processed = 0
        for gg_shop in gg_shops:
            processed += self._fetch_info(gg_shop)
import boto3
from helper import ConfigItemEvaluation, get_logger, adjust_tag_value, InapplicableConfigItemEvaluation

WIKI = 'https://wiki.bnc.ca/x/dwQkNQ'

# Testing for wide Egress HTTPS is too flaky
# Thightly coupled with https://git.bnc.ca/projects/CLOUDSEC/repos/aws-security-group/browse/known_cidr_blocks.yaml?at=refs%2Fheads%2Fdevelop#740
# from netaddr import *
# IPSet(['0.0.0.0/0']) - IPSet(['10.0.0.0/8', '172.16.0.0/12', '192.168.0.0/16'])
# VALID_INTERNET_CIDR = ('0.0.0.0/5', '8.0.0.0/7', '11.0.0.0/8', '12.0.0.0/6', '16.0.0.0/4', '32.0.0.0/3', '64.0.0.0/2', '128.0.0.0/3', '160.0.0.0/5', '168.0.0.0/6', '172.0.0.0/12', '172.32.0.0/11', '172.64.0.0/10', '172.128.0.0/9', '173.0.0.0/8', '174.0.0.0/7', '176.0.0.0/4', '192.0.0.0/9', '192.128.0.0/11', '192.160.0.0/13', '192.169.0.0/16', '192.170.0.0/15', '192.172.0.0/14', '192.176.0.0/12', '192.192.0.0/10', '193.0.0.0/8', '194.0.0.0/7', '196.0.0.0/6', '200.0.0.0/5', '208.0.0.0/4', '224.0.0.0/3')


# TODO The following check is not implemented yet:
# - Has to be attached to only one type of resource

LOG = get_logger()

def evaluate_compliance_item_Change(event, configuration_item):
    """TODO This control is not enabled yet"""
    return [InapplicableConfigItemEvaluation(configuration_item, event)]
    securityhub_resource_details = {
        'GroupName': configuration_item['configuration']['groupName'],
        'GroupId': configuration_item['configuration']['groupId'],
        'OwnerId': configuration_item['configuration']['ownerId'],
        'VpcId': configuration_item['configuration']['vpcId'],
        # TODO IpPermissions keys start with uppercase
        # https://docs.aws.amazon.com/securityhub/latest/userguide/securityhub-findings-format-attributes.html#asff-resourcedetails-awsec2securitygroup
        # 'IpPermissions': configuration_item['configuration']['ipPermissions'],
        # 'IpPermissionsEgress': configuration_item['configuration']['ipPermissionsEgress']
    }
    eval = ConfigItemEvaluation(__name__, configuration_item, WIKI, event, 'AwsEc2SecurityGroup', securityhub_resource_details)
Exemple #33
0
import pandas as pd
import argparse
import pickle

from gensim.summarization import bm25

# Custom functions
import sys
sys.path.append('./src')
import helper as he
import prepare as pr
import custom as cu
import data as dt

logger = he.get_logger(location=__name__)

rank_type_lookup = {
    'historical': 0,  #NOTE: currently only 0 is supported
    'textblocks': 1,
    'historical_thread': 2
}


class Rank():
    def __init__(self, task, rank_type='historical', inference=False):
        self.dt_rank = dt.Data(task=task, inference=inference)
        # Load bm25
        with open(self.dt_rank.get_path('fn_rank', dir='model_dir'),
                  'rb') as fh:
            self.bm = pickle.load(fh)
            self.data = pickle.load(fh)
Exemple #34
0
# -*- coding: utf-8; -*-
import os
import time
import requests
import ConfigParser

from requests.exceptions import RequestException
from helper import get_logger, base_path

__author__ = "Wen-Hao Lee"
__email__ = "*****@*****.**"
__copyright__ = "Copyright 2014, Numnum"

config = ConfigParser.ConfigParser()
config.read(os.path.join(base_path, 'config.ini'))
logger = get_logger('gg_api', __file__)


class GGBot(object):
    map_url = "https://maps.googleapis.com/maps/api/place"
    cooldown = 180  # sec
    search_radius = 1200  # m

    def search_nearby(self, keyword, type_, lat, lon):
        nearby_url = "%s/%s" % (self.map_url, "nearbysearch/json")
        params = {
            'keyword': keyword,
            'location': "%s,%s" % (lat, lon),
            'radius': self.search_radius,
            'language': "zh-TW",
            'type': type_,