Beispiel #1
0
def main():
    '''
        Main entry.

        Here goes code for testing the methods and classes of this module.
    '''
    logger_name = 'BayesianLogger'
    util.setup_logger(logger_name,
                      os.path.join('logs', 'bayesian_inversion.log'))
    logger = util.get_logger(logger_name)

    logger.info('ID={0}'.format(time.time()))

    word2vec_model = make_word2vec(iterations=1,
                                   size=100,
                                   skipgram=1,
                                   alpha=0.025,
                                   workers=4,
                                   logger_name=logger_name)
    dataset = util.read_dataset_threaded(os.path.join('data', 'raw_texts.txt'),
                                         processes=10,
                                         logger_name=logger_name)

    evaluate(dataset,
             word2vec_model,
             priors=True,
             k_folds=6,
             logger_name=logger_name)
Beispiel #2
0
def main():
    opts = parse_cli()
    setup_logger(stdout=not opts.quiet, network=opts.network_log)

    target_ = 'mainnet'
    if opts.testnet:
        target_ = 'testnet'

    server_ = opts.server or TARGET_SERVER[target_]
    ergo_update(server_, opts.api_key, opts.value)
Beispiel #3
0
async def main():
    """ Main function """
    env = Env()
    env.read_env()
    setup_logger(env.int("LOG_LEVEL", logging.INFO),
                 env.path("LOG_FILE", None))

    async with aiohttp.ClientSession() as session:
        bot = create_bot(env, session)

        LOGGER.debug("Starting bot")
        await bot.start(env.str("BOT_TOKEN"))
Beispiel #4
0
def setup_app():
    app = Flask(__name__)

    # setting up the app logger with a rotating file handler, in addition to
    #  the built-in flask logger which can be helpful in debug mode.

    additional_log_handlers = [
        RotatingFileHandler(
            config.instance().rest_service_log_path,
            maxBytes=1024*1024*100,
            backupCount=20)
    ]

    app.logger_name = 'manager-rest'
    setup_logger(logger_name=app.logger.name,
                 logger_level=logging.DEBUG,
                 handlers=additional_log_handlers,
                 remove_existing_handlers=False)

    app.before_request(log_request)
    app.after_request(log_response)

    # saving flask's original error handlers
    flask_handle_exception = app.handle_exception
    flask_handle_user_exception = app.handle_user_exception

    api = Api(app)

    # saving flask-restful's error handlers
    flask_restful_handle_exception = app.handle_exception
    flask_restful_handle_user_exception = app.handle_user_exception

    # setting it so that <500 codes use flask-restful's error handlers,
    # while 500+ codes use original flask's error handlers (for which we
    # register an error handler on somewhere else in this module)
    def handle_exception(flask_method, flask_restful_method, e):
        code = getattr(e, 'code', 500)
        if code >= 500:
            return flask_method(e)
        else:
            return flask_restful_method(e)

    app.handle_exception = functools.partial(
        handle_exception,
        flask_handle_exception,
        flask_restful_handle_exception)
    app.handle_user_exception = functools.partial(
        handle_exception,
        flask_handle_user_exception,
        flask_restful_handle_user_exception)

    resources.setup_resources(api)
    return app
Beispiel #5
0
def main():
    opts = parse_cli()
    setup_logger(stdout=not opts.quiet, network=opts.network_log)

    target_ = 'mainnet'
    if opts.testnet:
        target_ = 'testnet'

    server_ = opts.server or TARGET_SERVER[target_]
    api = ErgoClient(server_, opts.api_key)

    with open('box.id') as inp:
        lines = inp.read().splitlines()

    if len(lines) == 1:
        logging.debug('One box ID in box.id')
        pass
    elif len(lines) != 2:
        logging.error('File box.id contains %d lines (expected 2)' % (
            len(lines)
        ))
        sys.exit(1)
    else:
        logging.debug('Two box IDs in box.id')
        id1, id2 = lines
        if not is_box_spent(api, id1):
            logging.debug('First box ID is not spent')
            if opts.stop:
                with open('box.id', 'w') as out:
                    out.write('%s\n' % id1)  # remove 2nd line
                logging.debug('Removed second box ID')
        else:
            logging.debug('! First box ID is spent')
            if not is_box_spent(api, id2):
                logging.debug('Second box ID is not spent')
                res = api.request(
                    '/wallet/boxes/unspent'
                    '?minConfirmations=30&minInclusionHeight=0'
                )
                found = any(x['box']['boxId'] == id2 for x in res)
                if found:
                    with open('box.id', 'w') as out:
                        out.write('%s\n' % id2)  # remove 1st line
                    logging.debug('Removed first box ID')
                else:
                    logging.error('Not enough confirmations yet')
                    sys.exit(1)
            else:
                logging.error(
                    'Both 1st and 2nd box IDs are spent'
                )
                sys.exit(1)
def main():
    opts = parse_cli()
    setup_logger(stdout=not opts.quiet, network=opts.network_log)

    target_ = 'mainnet'
    if opts.testnet:
        target_ = 'testnet'

    server_ = opts.server or TARGET_SERVER[target_]

    price = get_currency(opts.quandl_key) / OZT
    logging.info('AUG/USD price: %s' % price)

    ergo_update(server_, opts.api_key, int(price*1e9))
def main():
    opts = parse_cli()
    setup_logger(stdout=not opts.quiet, network=opts.network_log)

    target_ = 'mainnet'
    if opts.testnet:
        target_ = 'testnet'

    server_ = opts.server or TARGET_SERVER[target_]

    cmc_api = CmcApi(opts.cmc_key)
    price = cmc_api.get_ergo_price('EUR')
    logging.info('EUR/ERG price: %s' % price)

    ergo_update(server_, opts.api_key, int(price * 1e7))
Beispiel #8
0
 def generate(self):
     logger = util.setup_logger(INFO)
     env = self.env.lower() if self.env else 'production'
     conf = util.getstanza('getsnow', env)
     # Proxy not currently used in this version
     # proxy_conf = util.getstanza('getsnow', 'global')
     # proxies = util.setproxy(conf, proxy_conf)
     username = conf['user']
     password = conf['password']
     url = conf['url']
     value_replacements = conf['value_replacements']
     user_name = self.user_name.split(',')
     daysAgo = int(self.daysAgo) if self.daysAgo else 30
     snowuser = snow(url, username, password)
     snowuser.replacementsdict(value_replacements)
     user_info = snowuser.getsysid('sys_user', 'user_name', user_name, mapto='user_name')
     for record in user_info[1]:
         record = snowuser.updaterecord(record, sourcetype='snow:user')
         record['_raw'] = util.tojson(record)
         yield record
     exuded = snowuser.filterbuilder('assigned_to', user_info[0])
     url = snowuser.reqencode([exuded], table='alm_asset')
     for record in snowuser.getrecords(url):
         record = snowuser.updaterecord(record, sourcetype='snow:asset')
         record['_raw'] = util.tojson(record)
         yield record
     exuded = snowuser.filterbuilder('opened_by', user_info[0])
     url = snowuser.reqencode([exuded], days=daysAgo)
     for record in snowuser.getrecords(url):
         record = snowuser.updaterecord(record, sourcetype='snow:incident')
         record['_raw'] = util.tojson(record)
         yield record
Beispiel #9
0
def main():
    opts = parse_cli()
    setup_logger(stdout=not opts.quiet, network=opts.network_log)

    target_ = 'mainnet'
    if opts.testnet:
        target_ = 'testnet'

    server_ = opts.server or TARGET_SERVER[target_]
    api = ErgoClient(server_, opts.api_key)

    # Ger first wallet address
    res = api.request(
        '/wallet/addresses',
    )
    addr = res[0]

    # Generate transaction
    tx_data = {
        'requests': [{
            'address': addr,
            'amount': 1,
            'name': opts.name,
            'description': opts.description,
            'decimals': 0,
        }],
        'fee': 1e6,
        'inputsRaw': [],
    }
    res = api.request(
        '/wallet/transaction/generate',
        data=tx_data,
    )

    out = res['outputs'][0]
    with open('address.id', 'w') as fobj:
        fobj.write(addr + '\n')
    with open('box.id', 'w') as fobj:
        fobj.write(out['boxId'] + '\n')
    with open('token.id', 'w') as fobj:
        fobj.write(out['assets'][0]['tokenId'] + '\n')

    api.request(
        '/wallet/transaction/send',
        data=tx_data,
    )
Beispiel #10
0
def main():
    '''
        Main entry.

        Here goes code for testing the methods and classes of this module.
    '''
    logger_name = 'DistributedRepresentationLogger'
    util.setup_logger(logger_name,
                      os.path.join('logs', 'distributed_representation.log'))
    logger = util.get_logger(logger_name)

    logger.info('ID={0}'.format(time.time()))

    dataset = util.read_dataset_threaded(os.path.join('data', 'raw_texts.txt'), processes=8,\
        logger_name=logger_name)

    doc2vec_base_model_dbow = make_doc2vec(size=400,
                                           window=5,
                                           dm=0,
                                           hierarchical_softmax=0,
                                           negative=10,
                                           dm_mean=0,
                                           dm_concat=0,
                                           dbow_words=1,
                                           workers=8,
                                           min_count=2,
                                           sample=0,
                                           logger_name=logger_name)

    # doc2vec_base_model_dm = make_doc2vec(size=400, window=5, dm=1, hierarchical_softmax=1,
    #     negative=0, dm_mean=0, dm_concat=0, dbow_words=0, workers=8, min_count=2, sample=0,
    #     logger_name=logger_name)

    # doc2vec_base_model = wrappers.ConcatDoc2vec([doc2vec_base_model_dbow, doc2vec_base_model_dm])

    # model = linear_model.LogisticRegression(solver='lbfgs')
    model = svm.SVC(kernel='linear')

    evaluate(dataset,
             model,
             doc2vec_base_model_dbow,
             k_folds=6,
             iterations=1,
             alpha=0.025,
             logger_name=logger_name)
def main(
    conf: str,
    log: str,
):
    # 設定を読み込み
    conf = load_config(conf)
    # 保存するファイル名を指定
    log_file = log
    # ログの初期設定を行う
    logger = setup_logger(log_file)
    logger.debug("test")
Beispiel #12
0
def main():
    opts = parse_cli()
    setup_logger(stdout=not opts.quiet, network=opts.network_log)

    target_ = 'mainnet'
    if opts.testnet:
        target_ = 'testnet'

    server_ = opts.server or TARGET_SERVER[target_]

    price1 = get_currency(opts.quandl_key) / OZT
    logging.info('AUG/USD price: %s' % price1)

    cmc_api = CmcApi(opts.cmc_key)
    price2 = cmc_api.get_ergo_price('USD')
    logging.info('USD/ERG price: %s' % price2)

    price = price1 / price2

    logging.info('AUG/ERG price: %s' % price)

    ergo_update(server_, opts.api_key, int(price * 1e9))
def main():
    '''
        Main entry.

        Here goes code for testing the methods and classes of this module.
    '''
    logger_name = 'NaiveLogger'
    util.setup_logger(logger_name, os.path.join('logs', 'naive.log'))
    logger = util.get_logger(logger_name)

    logger.info('ID={0}'.format(time.time()))

    dataset = util.read_dataset_threaded(os.path.join('data', 'raw_texts.txt'),
                                         processes=8,
                                         logger_name=logger_name)
    word2vec_base_model = make_word2vec(iterations=10,
                                        size=900,
                                        skipgram=1,
                                        hierarchical_softmax=0,
                                        negative=14,
                                        alpha=0.025,
                                        sample=1e-5,
                                        workers=8,
                                        logger_name=logger_name)

    # model = linear_model.SGDClassifier(loss='modified_huber')

    # model = linear_model.LogisticRegression(solver='lbfgs', n_jobs=1)

    # model = neighbors.NearestCentroid()

    model = svm.SVC(kernel='linear')

    evaluate(dataset,
             model,
             word2vec_base_model,
             k_folds=6,
             logger_name=logger_name)
Beispiel #14
0
def run(preprocessed_dir, invalid_thresh, invalid_user_thresh, relative_diff_thresh, data_split,
        no_interactions, negative, max_snps, model_id, cross_validation, output_dir):
    """
    Builds a model to predict phenotype
    :param preprocessed_dir: The directory containing the preprocessed data
    :param invalid_thresh: The acceptable percentage of missing data before a SNP is discarded
    :param invalid_user_thresh: The acceptable percentage of missing data before a user is discarded
    :param relative_diff_thresh: The relative difference in mutation percent, calculated as a percent of the
                                larger mutation percent value.
    :param data_split: The percent data used for testing.
    :param no_interactions: If True the model will not contain interactions
    :param negative: The negative phenotype label
    :param model_id: The id for the model to use
    :param cross_validation: number of folds for cross validation
    :param output_dir: The directory to write the model in
    """
    # Expand file paths
    preprocessed_dir = expand_path(preprocessed_dir)

    # Make sure output directory exists before doing work
    clean_output(output_dir)

    setup_logger(output_dir, model_id + "_model")

    # Get model
    build_model = MODELS.get(model_id)
    if not build_model:
        raise ValueError('Model Id "{}" is not valid'.format(model_id))

    phenotypes = timed_invoke('reading the preprocessed files', lambda: __read_phenotype_input(preprocessed_dir))

    data_set = timed_invoke('creating model data set', lambda: mutation_difference.create_dataset(
                               phenotypes, invalid_thresh, invalid_user_thresh, relative_diff_thresh)
                            )
    timed_invoke('building model', lambda: build_model(data_set, data_split, no_interactions, negative, max_snps,
                                                       cross_validation, output_dir))
    logger.info('Output written to "{}"'.format(output_dir))
Beispiel #15
0
 def generate(self):
     logger = util.setup_logger(INFO)
     env = self.env.lower() if self.env else 'production'
     conf = util.getstanza('getsnow', env)
     #proxy_conf = util.getstanza('getsnow', 'global')
     username = conf['user']
     password = conf['password']
     active = self.active
     user_name = self.user_name.split(',') if self.user_name else []
     assigment_group = self.assignment_group.split(',') if self.assignment_group else []
     daysAgo = int(self.daysAgo) if self.daysAgo else None
     limit = self.limit
     daysBy = self.daysBy if self.daysBy else 'opened_at'
     filterBy = self.filterBy if self.filterBy else 'assigned_to'
     url = conf['url']
     value_replacements = conf['value_replacements']
     if active:
         try:
             active = active.strip()
             active = active[0].upper() + active[1:].lower()
             active = ast.literal_eval(active)
         except:
             active = True
     if limit:
         try:
             limit = int(limit)
         except:
             limit = 10000
     snowincident = snow(url, username, password)
     snowincident.replacementsdict(value_replacements)
     user_info = snowincident.getsysid('sys_user', 'user_name', user_name, mapto='user_name')[0]
     group_info = snowincident.getsysid('sys_user_group', 'name', assigment_group, mapto='name')[0]
     exuded1 = snowincident.filterbuilder(filterBy, user_info)
     exuded2 = snowincident.filterbuilder('assignment_group', group_info)
     url = snowincident.reqencode([exuded1, exuded2], table='incident', active=active, timeby=daysBy, days=daysAgo)
     for record in snowincident.getrecords(url, limit=limit):
         record = snowincident.updaterecord(record, sourcetype='snow:incident')
         record['_raw'] = util.tojson(record)
         yield record
Beispiel #16
0
    def __init__(self, q1, q2):
        self.q1 = q1
        self.q2 = q2
        self.save_dir = "/home/newuhe/Data9/"
        self.test_dir = "/home/newuhe/Data9/Test/"
        self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)

        # self.rtmon = URRTMonitor("192.168.1.147")  # som information is only available on rt interface
        # self.rtmon.start()
        # self.rtmon.set_csys(None)
        self.secmon = SecondaryMonitor("192.168.1.147")
        self.log = {}

        setup_logger('worker_log', r'worker_log')
        self.log['worker_log'] = logging.getLogger('worker_log')

        self.test_log = {}
        setup_logger('test_log', r'test_log')
        self.test_log['test_log'] = logging.getLogger('test_log')

        self.force = 18
        self.d = -0.3
        self.v = 0.0003

        ## usb
        #self.primitive = [-0.61306625, -0.05149676 , 0.0065 ,-2.8868024,   1.23261833,0.00922825]

        ## round
        # self.primitive = [-0.30288  ,0.38096 , 0.019 , 2.3236 , -2.1558 , -0.0266]
        #(up pose)self.primitive = [-0.3026  ,0.37825 , 0.01041 , 2.3236 , -2.1558 , -0.0266]
        #(new round)self.primitive = [-4.96312008e-01 , 5.53456907e-01  ,1.12701285e-03, -1.16724081e+00, .87952391e+00 , 1.91615209e-02]
        self.primitive = [
            -0.50454686, 0.56308032, 0.00392812, 1.19445411, -2.90431322,
            0.05488621
        ]

        self.half_count = 1
        self.rcount = 3

        self.count = 0
        self.pcount = 0

        ## for orientation
        self.sequence = []
        self.rinit = -0.21
        self.interval = 0.07

        ## for translation
        self.xcount = 3
        self.xhalf = (self.xcount - 1) / 2
        self.ycount = 3
        self.yhalf = (self.ycount - 1) / 2

        ## round
        self.z = self.primitive[2]
        self.yinterval = 0.006 / self.xhalf
        self.xinterval = 0.006 / self.yhalf

        ## usb
        # self.z = 0.0065
        # self.yinterval = 0.003 / self.yhalf
        # self.xinterval = 0.006 / self.xhalf

        # for i in range(9):b
        #     self.xsequence.append(x)
        #     self.ysequence.append(y)
        #     x += self.xinterval
        #     y += self.yinterval
        # self.ysequence_reverse = list(reversed(self.ysequence))
        # self.sequence_reverse = list(reversed(self.sequence))

        # print(self.xsequence)
        # print(self.ysequence)
        # print(self.sequence)
        #self.log['worker_log'].info('x- = %0.5f, x+ = %0.5f, y- = %0.5f, y+ = %0.5f, r- = %0.5f, r+ = %0.5f' % (self.xsequence[0],self.xsequence[8],self.ysequence[0],self.ysequence[8],self.sequence[0],self.sequence[8]))

        self.connect()

        ## reset to ground truth      TCP:(0,0,0.225,0,0,0)
        self.movel(self.primitive, t=3)
        #self.movej([-1.20141346,-1.24530489,1.70199537,-2.01912338,-1.5878895,-0.38679821],t=3)

        ## take reference picture
        time.sleep(0.5)
        self.move_down(v=self.v)
        fz = 0
        z = 0.1
        # (up pose)while( abs(fz) < self.force and z>0.001):
        #(new round)while( abs(fz) < self.force and z>-0.005):
        while (abs(fz) < self.force and z > -0.004):
            pose = self.secmon.get_cartesian_info(False)
            z = pose["Z"]
            f.getForce()
            fx, fy, fz, px, py, pz = f.parseData()
        self.stop()

        ## take picture
        self.q1.put(1)
        pic, depth = self.q2.get(True)
        cv2.imwrite("/home/newuhe/Data9/" + 'reference.jpg', pic)
        cv2.imwrite("/home/newuhe/Data9/Test/" + 'reference.jpg', pic)

        ## pose init
        self.yi = int(self.count / self.ycount)
        self.xi = self.count - self.ycount * self.yi
        self.rxi = int(self.pcount / self.rcount)
        self.ryi = self.pcount - (self.half_count * 2 + 1) * self.rxi
        if self.yi % 2 == 0:
            if self.xi % 2 == 0:
                #orientation1
                if self.rxi % 2 == 0:
                    self.move(self.primitive, [
                        self.xinterval *
                        (self.xhalf - self.xi), self.yinterval *
                        (self.yhalf - self.yi), 0, self.interval *
                        (self.half_count - self.rxi), self.interval *
                        (self.half_count - self.ryi), 0
                    ])
                else:
                    self.move(self.primitive, [
                        self.xinterval *
                        (self.xhalf - self.xi), self.yinterval *
                        (self.yhalf - self.yi), 0, self.interval *
                        (self.half_count - self.rxi), self.interval *
                        (-self.half_count + self.ryi), 0
                    ])
            else:
                #orientation2
                if self.rxi % 2 == 0:
                    self.move(self.primitive, [
                        self.xinterval *
                        (self.xhalf - self.xi), self.yinterval *
                        (self.yhalf - self.yi), 0, self.interval *
                        (-self.half_count + self.rxi), self.interval *
                        (-self.half_count + self.ryi), 0
                    ])
                else:
                    self.move(self.primitive, [
                        self.xinterval *
                        (self.xhalf - self.xi), self.yinterval *
                        (self.yhalf - self.yi), 0, self.interval *
                        (-self.half_count + self.rxi), self.interval *
                        (self.half_count - self.ryi), 0
                    ])
        else:
            if self.xi % 2 == 0:
                #orientation2
                if self.rxi % 2 == 0:
                    self.move(self.primitive, [
                        self.xinterval *
                        (-self.xhalf + self.xi), self.yinterval *
                        (self.yhalf - self.yi), 0, self.interval *
                        (-self.half_count + self.rxi), self.interval *
                        (-self.half_count + self.ryi), 0
                    ])
                else:
                    self.move(self.primitive, [
                        self.xinterval *
                        (-self.xhalf + self.xi), self.yinterval *
                        (self.yhalf - self.yi), 0, self.interval *
                        (-self.half_count + self.rxi), self.interval *
                        (self.half_count - self.ryi), 0
                    ])
            else:
                #orientation1
                if self.rxi % 2 == 0:
                    self.move(self.primitive, [
                        self.xinterval *
                        (-self.xhalf + self.xi), self.yinterval *
                        (self.yhalf - self.yi), 0, self.interval *
                        (self.half_count - self.rxi), self.interval *
                        (self.half_count - self.ryi), 0
                    ])
                else:
                    self.move(self.primitive, [
                        self.xinterval *
                        (-self.xhalf + self.xi), self.yinterval *
                        (self.yhalf - self.yi), 0, self.interval *
                        (self.half_count - self.rxi), self.interval *
                        (-self.half_count + self.ryi), 0
                    ])
Beispiel #17
0
burnin_str = ''
if burnin:
    burnin_str = 'burn' + str(burnin).lower()

reg_beta_str = ''
if reg_beta > 0.0:
    reg_beta_str = 'reg' + str(reg_beta) + '_'

additional_features_str = additional_features
if additional_features != '':
    additional_features_str = additional_features + '_'

tensorboard_name = str(args.logs_dir.absolute())

name_experiment = tensorboard_name
logger = util.setup_logger(logs_dir=str(args.logs_dir.absolute()), also_stdout=True)
logger.info('PARAMS :  ' + name_experiment)
logger.info('')
logger.info(args)


if dataset.startswith('PRFX'):
    if dataset == 'PRFX10':
        suffix = 'prefix_10_dataset'
    if dataset == 'PRFX30':
        suffix = 'prefix_30_dataset'
    if dataset == 'PRFX50':
        suffix = 'prefix_50_dataset'
    word_to_id_file_path = os.path.join(root_path, suffix, 'word_to_id')
    id_to_word_file_path = os.path.join(root_path, suffix, 'id_to_word')
    training_data_file_path = os.path.join(root_path, suffix, 'train')
Beispiel #18
0
__license__ = "ASF"
__version__ = "2.0"
__maintainer__ = "Bernardo Macias"
__email__ = '*****@*****.**'
__status__ = 'Production'

import util
import sys
import ast
from snowpy import snow
from logging import INFO
from splunklib.searchcommands import \
    dispatch, GeneratingCommand, Configuration, Option


logger = util.setup_logger(INFO)

@Configuration(local=True)
class snowNowCommand(GeneratingCommand):
    """ %(synopsis)

    ##Syntax
    .. code-block::
    getsnow filters="<key1>=<value1> <key2>=<value2>" daysAgo=<int> env=<str> table=<str>

    ##Description

    Returns json events for Service Now API from tables.  Limit 1000 events.

    ##Example
Beispiel #19
0
import os
import logging

import code_generator
import util

CODES_STR = "C6 D8 E3 G9 L3 M8 R3 T3"
CODEWORD_FRAGMENT = "T"
DICTIONARY_DIRECTORY = "dicts/"

if __name__ == "__main__":
    util.setup_logger()
    logger = logging.getLogger()

    codes = util.parse_codecards(CODES_STR)
    codeword_fragment = util.parse_codeword_fragment(CODEWORD_FRAGMENT)
    dict_dir = os.path.abspath(DICTIONARY_DIRECTORY)
    if codes is None:
        logger.error(
            "Could not parse codes, code string was '{}'".format(CODES_STR))
    elif codeword_fragment is None:
        logger.error(
            "Could not parse codeword fragment, fragment was '{}'".format(
                CODEWORD_FRAGMENT))
    else:
        logger.info("Codes used: {}".format(CODES_STR))
        logger.info("Dictionary dir: {}".format(dict_dir))
        results = code_generator.create_codes_by_fragment(
            codes, CODEWORD_FRAGMENT, DICTIONARY_DIRECTORY)
        if len(results) == 0:
            logger.info("No valid codes found!")
    # Plot generation
    for classification in classification_types:
        fig = plt.figure(figsize=(15, 30))
        for i, score in enumerate(metrics_dict.keys()):
            ax = fig.add_subplot(len(set(metrics_dict))//2+1, 2, i + 1)
            ax.plot(number_of_classes, metrics_dict[score][classification])
            plt.title("Classification %s for %s" % (score, classification))
            plt.xlabel("Number of classes")
            plt.ylabel(score)
        plt.savefig("../images/LSHTC_" + classification)
        plt.close(fig)

    fig = plt.figure(figsize=(15, 30))
    for i, score in enumerate(metrics_dict.keys()):
        ax = fig.add_subplot(len(set(metrics_dict))//2+1, 2, i + 1)
        for classification in classification_types:
            ax.plot(number_of_classes, metrics_dict[score][classification])
        plt.title("Classification %s" % score)
        plt.legend(classification_types, loc="upper right")
        plt.xlabel("Number of classes")
        plt.ylabel(score)
    plt.savefig("../images/LSHTC_all")
    plt.close(fig)


if __name__ == '__main__':
    force_locale_utf8()
    logger = setup_logger('lshtc')
    main()
Beispiel #21
0
from bot import BotApp
import util

config = util.load_config('config.json')
logger = util.setup_logger(config['log_file'])
bot = BotApp(oauth_token=config['auth_token'],
             channel=config['join_channel'],
             nick=config['nick'])

boops = 0


@bot.command(name='test')
def test(sender, *args):
    bot.send(f'Hello, @{sender}')


@bot.command(name='addboop')
def add_boop(sender, *args):
    bot.send(f'Incremented boop count')
    global boops
    boops += 1


@bot.command(name='boops')
def print_boops(sender, *args):
    bot.send(f'Total boops: {boops}')


def post_login():
    bot.send('/me has arrived!')
Beispiel #22
0
import config  #

from datetime import datetime

from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.action_chains import ActionChains

from xvfbwrapper import Xvfb

# setup log
wikilog = util.setup_logger("wikilog", "pyout/wikilog.log")


#-----------------------------------------------
# Startup browser (Firefox)
# - returns browser object
#-----------------------------------------------
def start_browser():

    ff_browser = webdriver.Firefox()
    ff_browser.implicitly_wait(10)
    ff_browser.maximize_window()

    return ff_browser

Beispiel #23
0
                   'INP' + inputs_geom + '_' + \
                   'BIAS' + bias_geom + fix_biases_str + '_' + mat_str +\
                   'FFNN' + ffnn_geom + str(before_mlr_dim) + ffnn_non_lin + '_' +\
                   additional_features_str + \
                   drp_str +\
                   'MLR' + mlr_geom + '_' + \
                   reg_beta_str + \
                   hyp_opt_str + \
                   c_str +\
                   'prje' + str(PROJ_EPS) + '_' + \
                   'bs' + str(batch_size) + '_' +\
                   burnin_str +  '__' + now.strftime("%H:%M:%S,%dM")

name_experiment = tensorboard_name
logger = util.setup_logger(name_experiment,
                           logs_dir=os.path.join(root_path, 'logs/'),
                           also_stdout=True)
logger.info('PARAMS :  ' + name_experiment)
logger.info('')
logger.info(args)

if dataset.startswith('PRFX'):
    if dataset == 'PRFX10':
        suffix = 'prefix_10_dataset'
    if dataset == 'PRFX30':
        suffix = 'prefix_30_dataset'
    if dataset == 'PRFX50':
        suffix = 'prefix_50_dataset'
    word_to_id_file_path = os.path.join(root_path, suffix, 'word_to_id')
    id_to_word_file_path = os.path.join(root_path, suffix, 'id_to_word')
    training_data_file_path = os.path.join(root_path, suffix, 'train')
Beispiel #24
0
import argparse
import logging
from os import mkdir
from os.path import dirname, join, isdir

from loader import DateBasedOrganizer, get_record_filepaths
from merge import move_files, copy_files
from util import load_config, inject_config_if_missing, setup_logger, check_third_party

# Setup logging
logger = setup_logger(logging, __name__)


# Main function
def run(video_dir, audio_dir, output_dir=None, move=False):
    if output_dir is None:
        output_dir = join(dirname(__file__), 'results')
        if not isdir(output_dir):
            mkdir(output_dir)

    logger.info('Prehladavam zlozky ktore si zadal...')

    files = get_record_filepaths(video_dir)
    files.extend(get_record_filepaths(audio_dir))

    logger.info('Nasiel som %d nahravok. (konkretne mas v logu)' % len(files))
    logger.debug('Najdene nahravky:\n%s' % '\n'.join(files))

    logger.info('Skupinkujem ti nahravky...')

    organizer = DateBasedOrganizer()
Beispiel #25
0
                    help='Location of the noise')
parser.add_argument('--poison_rate', default=1.0, type=float)
parser.add_argument('--perturb_tensor_filepath', default=None, type=str)
args = parser.parse_args()

# Set up Experiments
if args.exp_name == '':
    args.exp_name = 'exp_' + datetime.datetime.now()

exp_path = os.path.join(args.exp_name, args.version)
log_file_path = os.path.join(exp_path, args.version)
checkpoint_path = os.path.join(exp_path, 'checkpoints')
checkpoint_path_file = os.path.join(checkpoint_path, args.version)
util.build_dirs(exp_path)
util.build_dirs(checkpoint_path)
logger = util.setup_logger(name=args.version, log_file=log_file_path + ".log")

# CUDA Options
logger.info("PyTorch Version: %s" % (torch.__version__))
if torch.cuda.is_available():
    torch.cuda.manual_seed(args.seed)
    torch.backends.cudnn.enabled = True
    torch.backends.cudnn.benchmark = True
    device = torch.device('cuda')
    device_list = [
        torch.cuda.get_device_name(i)
        for i in range(0, torch.cuda.device_count())
    ]
    logger.info("GPU List: %s" % (device_list))
else:
    device = torch.device('cpu')
Beispiel #26
0
def train():
    args = parser.parse_args()
    print(args.gpu)
    log = {}
    setup_logger('train_log', r'train_log')
    log['train_log'] = logging.getLogger('train_log')
    save_model_dir = "/home/newuhe/UR5_control/trained_model/"
    n_epochs = 1000000
    save_time = 500
    gpu = False

    generator = dataGenerator()
    model = locNet()
    if args.load is True:
        saved_state = torch.load(args.load_model_dir + str(args.epoch) +
                                 '.dat')
        model.load_state_dict(saved_state)
        print("load succssfully")
    model.train()

    if args.gpu is True:
        print("gpu used")
        model = model.cuda()
    criterion = nn.MSELoss()
    optimizer = optim.Adam(model.parameters(), lr=0.001)
    losses = np.zeros(n_epochs)

    for epoch in range(args.epoch, n_epochs):
        ## one sequence
        p, f, a, t = generator.train_data()
        pic = Variable(torch.from_numpy(np.array(p)).float())
        pic = pic.permute(0, 3, 1, 2)  # (10,3,120,160)
        force = Variable(torch.from_numpy(np.array(f)).float())  # (10,1,6)
        action = Variable(torch.from_numpy(np.array(a)).float())
        target = Variable(torch.from_numpy(np.array(t)).float())
        cx = Variable(torch.zeros(1, 1, 72))
        hx = Variable(torch.zeros(1, 1, 72))
        if args.gpu is True:
            hx = hx.cuda()
            cx = cx.cuda()
            pic = pic.cuda()
            force = force.cuda()
            action = action.cuda()
            target = target.cuda()
        model.zero_grad()

        # for pic, force, action, target in train_data:
        #     pic = Variable(torch.from_numpy(np.array([pic.tolist(),])).float())
        #     pic = pic.permute(0,3,1,2)
        #     force = Variable(torch.from_numpy(np.array([force,])).float())
        #     action = Variable(torch.from_numpy(np.array([action,])).float())
        #     target = Variable(torch.from_numpy(np.array([target,])).float())

        pos, (hx, cx) = model((pic, force, action, (hx, cx)))
        loss = criterion(pos, target)
        #loss = criterion(pos + action, target)
        # loss.backward(retain_graph=True)
        loss.backward()
        optimizer.step()
        if epoch % 500 == 0:
            log['train_log'].info('epoch = %d,train_loss = %0.5f' %
                                  (epoch, loss))

        if epoch % save_time == 0:
            ## save model and eval
            state_to_save = model.state_dict()
            torch.save(state_to_save,
                       '{0}{1}.dat'.format(save_model_dir, epoch))
            print("get: ", pos)
            print("target: ", target)

            pt, ft, at, tt = generator.test_data()
            pict = Variable(torch.from_numpy(np.array(pt)).float())
            pict = pict.permute(0, 3, 1, 2)  # (10,3,120,160)
            forcet = Variable(torch.from_numpy(
                np.array(ft)).float())  # (10,1,6)
            actiont = Variable(torch.from_numpy(np.array(at)).float())
            targett = Variable(torch.from_numpy(np.array(tt)).float())
            cxt = Variable(torch.zeros(1, 1, 72))
            hxt = Variable(torch.zeros(1, 1, 72))
            if args.gpu is True:
                hxt = hxt.cuda()
                cxt = cxt.cuda()
                pict = pict.cuda()
                forcet = forcet.cuda()
                actiont = actiont.cuda()
                targett = targett.cuda()
            post, (hxt, cxt) = model((pict, forcet, actiont, (hxt, cxt)))
            losst = criterion(post, targett)

            # for pic, force, action, target in test_data:
            #     pic = Variable(torch.from_numpy(np.array([pic.tolist(),])).float())
            #     pic = pic.permute(0,3,1,2)
            #     force = Variable(torch.from_numpy(np.array([force,])).float())
            #     action = Variable(torch.from_numpy(np.array([action,])).float())
            #     target = Variable(torch.from_numpy(np.array([target,])).float())
            #     if args.gpu==True:
            #         pic = pic.cuda()
            #         force = force.cuda()
            #         ation = action.cuda()
            #         target = target.cuda()
            #     pos, (hx, cx) = model( (pic, force, action,(hx, cx)) )

            #     loss = criterion(pos, target)
            #     test_loss.append(loss.data[0])
            log['train_log'].info('epoch = %d,test_loss = %0.5f' %
                                  (epoch, losst))
Beispiel #27
0
            'nb_actions': nb_actions,
            'args': args,
        }

    agent = WolpertingerAgent(**agent_args)

    if args.load:
        agent.load_weights(args.load_model_dir)

    if args.gpu_ids[0] >= 0 and args.gpu_nums > 0:
        agent.cuda_convert()

    # set logger, log args here
    log = {}
    if args.mode == 'train':
        setup_logger('RS_log', r'{}/RS_train_log'.format(args.save_model_dir))
    elif args.mode == 'test':
        setup_logger('RS_log', r'{}/RS_test_log'.format(args.save_model_dir))
    else:
        raise RuntimeError('undefined mode {}'.format(args.mode))
    log['RS_log'] = logging.getLogger('RS_log')
    d_args = vars(args)
    d_args['max_actions'] = args.max_actions
    for key in agent_args.keys():
        if key == 'args':
            continue
        d_args[key] = agent_args[key]
    for k in d_args.keys():
        log['RS_log'].info('{0}: {1}'.format(k, d_args[k]))

    if args.mode == 'train':
Beispiel #28
0
# Imports
import argparse
import util
import os, random
import json
import torch

#Configure log
logging = util.setup_logger(__name__, 'app.log') 

# Configure ArgumentParser 
parser = argparse.ArgumentParser(description = 'Predict the different species of flowers.')

parser.add_argument('img_path', action = 'store', help = 'Directory with images for predict.')
parser.add_argument('checkpoint_file', action = 'store', help = 'checkpoint file.')
parser.add_argument('--gpu', action='store_true', help='use gpu to infer classes')
parser.add_argument('--topk', action = 'store', dest = 'topk', type=int, default = 5, required = False, help = 'Return top K most likely classes')
parser.add_argument('--category_names', action='store', help='Label mapping file')

arguments = parser.parse_args()

try:
    # Use GPU if it's available
    #device = util.choose_device(arguments.gpu)
    
    #loads a checkpoint and rebuilds the model
    model = util.load_checkpoint(arguments.checkpoint_file)
    model.eval()
    
    #Image Preprocessing
    img_file = random.choice(os.listdir(arguments.img_path))
Beispiel #29
0
import json
import os
from load import Load
from cdf import CDF
from util import setup_logger

with open('config.json', 'r') as f:
    cfg = json.load(f)

proxy = '{}:{}'.format(cfg['proxyIp'], str(cfg['proxyPort']))

PROXYDICT = {"http": proxy, "https": proxy}

urls = cfg['urls']

setup_logger('', console=True)

load = Load(cfg['numClients'], cfg['reqsPerClient'], cfg['iterations'],
            PROXYDICT, urls)
load.run()

cdf = CDF(PROXYDICT, urls)
cdf.run(cfg['numRequests'])
Beispiel #30
0
    parser.add_argument(action="store",
                        dest='output_path',
                        type=str,
                        metavar='PATH')
    parser.add_argument('-c',
                        '--config',
                        action="store",
                        dest='config_file',
                        default='/ARD_Clip.conf',
                        required=False,
                        type=str,
                        metavar='PATH')
    return vars(parser.parse_args())


if __name__ == '__main__':
    args = parse_cli()
    conf = config.read_config(args['config_file'])
    setup_logger(level='debug' if conf.debug else 'info')

    logger.info('******************Start************')
    logger.info('             DB connection: %s', conf.connstr)
    logger.info("             Version: %s", conf.version)
    logger.info("             Debug: %s", conf.debug)
    logger.info('segment: %s', args['segment'])
    logger.info('output path: %s', args['output_path'])

    process_segments(args['segment'], args['output_path'], conf)

    logger.info('..................Normal End............')
    def generate(self):
        logger = util.setup_logger(INFO)
        try:
            default_conf = util.getstanza('hipchat', 'default')
            local_conf = util.getstanza('hipchat', 'hipchat')
            proxy_conf = util.setproxy(local_conf, default_conf)
            hipchat_url = local_conf[
                'url'] if 'url' in local_conf else default_conf['url']
            auth_token = local_conf[
                'authToken'] if 'authToken' in local_conf else default_conf[
                    'autToken']
            timeout = local_conf[
                'timeout'] if 'timeout' in local_conf else default_conf[
                    'timeout']
        finally:
            logger.info(
                'Unable to parse Config File. Check if hipchat.conf exists')
            raise Exception(
                "Unable to parse Config File. Check if hipchat.conf exists")
            exit()

        headers = dict(Authorization='Bearer {0}'.format(auth_token))
        data = dict(message=None, message_format='text')
        hipchat_room_url = '{0}/v2/room/{1}/notification'.format(
            hipchat_url, self.room)
        hipchat_room_list_url = '{0}/v2/room'.format(hipchat_url)

        if not self.listrooms:
            for record in records:
                message = None
                for key, value in record:
                    message = message.join('{0}={1} '.format(key, value))
                response = util.request(hipchat_room_url,
                                        data=data,
                                        headers=headers,
                                        timeout=timeout,
                                        proxy=proxy_conf)
                logger.info(
                    'sending notification room={0} status_code={1} response={2}'
                    .format(self.room, response['code'], response['msg']))
                record['status_code'] = response['code']
                record['response'] = response['msg']
                record['_raw'] = util.tojson(response)
                yield record
        else:
            while hipchat_room_list_url:
                response = util.request(hipchat_room_list_url,
                                        headers=headers,
                                        timeout=timeout,
                                        proxy=proxy_conf)
                if response['code'] == 200:
                    room_list = json.loads(response['msg'])
                    hipchat_room_list_url = room_list['links'][
                        'next'] if 'next' in room_list['links'] else None
                    for room in room_list['items']:
                        room_info = dict()
                        room_info['room_id'] = room['id']
                        room_info['room_name'] = room['id']
                        room_info['_raw'] = util.tojson(room_info)
                        yield room_info
                else:
                    yield response
        exit()
Beispiel #32
0
# --------- #
# Constants #
# --------- #

PRODUCTS = {
    "CMS": "Cisco Meeting Server",
    "CMA": "Cisco Meeting App",
    "CMM": "Cisco Meeting Manager"
}

SWDLFILE = r'data\SWDL_data.xlsx'
SWDLSHEET = r'SWDownloads-123'

# setup log
swdllog = util.setup_logger("swdllog", "swdllog.log")


#-------------------------------------------------------------
# Import data from a defined sheet in a given Excel workbook
# - returns DataFrame structure
#-------------------------------------------------------------
def import_from_excel(xlfile, xlsheet):

    import_df = None

    if not xlfile or not xlsheet:
        swdllog.error("Excel filename and sheetname required for import")
        return import_df

    try:
Beispiel #33
0
from flask import Flask
from flask import request
from multiprocessing import Process

import util
from conf import config
from main import notify_rt_oneshot

app = Flask(__name__)
logger = util.setup_logger()


@app.route("/")
def hello():
    logger.debug('123')
    return "Hello World!"


@app.route("/bus_watcher", methods=['POST'])
def bus_watcher():
    data = request.json
    route = data['route']
    dep_station = data['dep_station']
    at = data['at']

    t = Process(target=notify_rt_oneshot, args=(route, dep_station, at))
    t.deamon = True
    t.start()

    return "Enabled notify for bus %s from %s when it's at %s" % (
        route, dep_station, at)
import json

import requests

import util

log = util.setup_logger(__name__)


class CoinCap(object):
    """
    A class which manages connections to the CoinMarketCap.com API
    """
    def __init__(self):
        self.url = "https://api.coinmarketcap.com/v1/ticker/"

    def get_coin_values_usd(self, coin_name_array, portfolio):
        """
        For a dictionary which maps coin to their amount returns a
        dictionary which maps those coints to the corresponding values in USD.
        """
        price_data = self.get_coin_price_data(coin_name_array)
        d = {}
        for symb, amount in portfolio.items():
            for api_coin, price_dict in price_data.items():
                if price_dict["symbol"] == symb:
                    d[symb] = amount * float(price_dict["price"])
                    break
        return d

    def get_coin_names(self, count):
Beispiel #35
0
    def generate(self):
        # Parse and set arguments
        logger = util.setup_logger(INFO)

        # get config
        env = self.env.lower() if self.env else 'production'
        conf = util.getstanza('getsnow', env)
        proxy_conf = util.getstanza('getsnow', 'global')
        proxies = util.setproxy(conf, proxy_conf)
        username = conf['user']
        password = conf['password']
        url = conf['url']

        user_query = '%s/api/now/table/%s?sysparm_query=user_name=%s' % (url, 'sys_user', self.user_name)

        response = util.request(user_query,
                                username=username,
                                password=password,
                                headers={'Accept': 'application/json'}
                                )

        if response['code'] == 200:
            records = json.loads(response['msg'])
            # for each event creating dic object for yield
            for record in records['result']:
                record['_time'] = time.mktime(datetime.datetime.strptime(record['sys_created_on'], "%Y-%m-%d %H:%M:%S").timetuple())
                record['url'] = url
                if record['manager']['link']:
                    response = util.request(record['manager']['link'],
                                            username=username,
                                            password=password,
                                            headers={'Accept': 'application/json'}
                                            )
                    manager = json.loads(response['msg'])['result']
                    record['manager'] = manager['name']
                    record['manager_email'] = manager['email']
                    record['manager_phone'] = manager['phone']
                if record['u_office']['link']:
                    response = util.request(record['u_office']['link'],
                                            username=username,
                                            password=password,
                                            headers={'Accept': 'application/json'}
                                            )
                    office = json.loads(response['msg'])['result']
                    record['office_number'] = office['u_office_number']
                if record['location']['link']:
                    response = util.request(record['location']['link'],
                                            username=username,
                                            password=password,
                                            headers={'Accept': 'application/json'}
                                            )
                    location = json.loads(response['msg'])['result']
                    record['office_name'] = location['full_name']
                if record['department']['link']:
                    response = util.request(record['department']['link'],
                                            username=username,
                                            password=password,
                                            headers={'Accept': 'application/json'}
                                            )
                    department = json.loads(response['msg'])['result']
                    record['department'] = department['name']

                # removing unnecessary keys
                record.pop('sys_domain', None)
                record.pop('u_office', None)
                record.pop('company', None)
                record.pop('u_organization_group', None)
                record.pop('u_title', None)
                record.pop('ldap_server', None)
                record.pop('cost_center', None)
                user_sysid = record['sys_id']
                record['sourcetype'] = 'snow:user'
                record['source'] = user_query
                # adding _raw to record
                record['_raw'] = util.tojson(record)
                record['_time'] = time.mktime(datetime.datetime.strptime(record['sys_created_on'], "%Y-%m-%d %H:%M:%S").timetuple())

                #yielding record
                yield record

                # building query string incidents
                time_range = 'opened_at>=javascript:gs.daysAgo(%s)^' % self.daysAgo if self.daysAgo else ''
                incident_query = '%s/api/now/table/%s?sysparm_query=%sopened_by=%s' % (url, 'incident', time_range, user_sysid)
                response = util.request(incident_query,
                                        username=username,
                                        password=password,
                                        headers={'Accept': 'application/json'}
                                        )

                if response['code'] == 200:
                    incidents = json.loads(response['msg'])['result']

                    # replacing all sys_id with user_names
                    for incident in incidents:
                        incident = keyreplace(incident, 'closed_by', 'user_name', username, password)
                        incident = keyreplace(incident, 'opened_by', 'user_name', username, password)
                        incident = keyreplace(incident, 'assigned_to', 'user_name', username, password)
                        incident = keyreplace(incident, 'resolved_by', 'user_name', username, password)
                        incident = keyreplace(incident, 'caller_id', 'user_name', username, password)
                        incident = keyreplace(incident, 'u_opened_for', 'user_name', username, password)
                        incident = keyreplace(incident, 'assignment_group', 'name', username, password)
                        incident['source'] = incident_query
                        incident['sourcetype'] = 'snow:incident'
                        incident['_time'] = time.mktime(datetime.datetime.strptime(incident['sys_created_on'], "%Y-%m-%d %H:%M:%S").timetuple())
                        incident['_raw'] = util.tojson(incident)

                        # removing unnecessary keys
                        incident.pop('company', None)
                        incident.pop('location', None)

                        # yield incident record
                        yield incident
                else:
                    try:
                    # If not 200 status_code showing error message in Splunk UI
                        record = util.dictexpand(response)
                        record['url'] = url
                        record['_raw'] = util.tojson(response)
                    except Exception as e:
                        record = dict()
                        record['url'] = url
                        record['error'] = e
                        record['_raw'] = util.tojson(response)
                        yield record

                asset_query = '%s/api/now/table/%s?sysparm_query=assigned_to=%s' % (url, 'alm_asset', user_sysid)
                response = util.request(asset_query,
                                        username=username,
                                        password=password,
                                        headers={'Accept': 'application/json'}
                                        )
                if response['code'] == 200:
                    assets = json.loads(response['msg'])['result']

                    for asset in assets:
                        asset.pop('support_group', None)
                        asset.pop('department', None)
                        asset.pop('model', None)
                        asset.pop('ci', None)
                        asset.pop('company', None)
                        asset.pop('location', None)
                        asset.pop('model_category', None)
                        asset.pop('cost_center', None)
                        asset.pop('sys_domain', None)
                        asset['source'] = asset_query
                        asset['_time'] = time.mktime(datetime.datetime.strptime(asset['sys_created_on'], "%Y-%m-%d %H:%M:%S").timetuple())
                        asset['sourcetype'] = 'snow:asset'
                        asset['_raw'] = util.tojson(asset)
                        yield asset

                else:
                    try:
                        # If not 200 status_code showing error message in Splunk UI
                        record = util.dictexpand(response)
                        record['url'] = url
                        record['_raw'] = util.tojson(response)
                    except Exception as e:
                        record = dict()
                        record['url'] = url
                        record['error'] = e
                        record['_raw'] = util.tojson(response)
                        yield record

        else:
            try:
                # If not 200 status_code showing error message in Splunk UI
                record = util.dictexpand(response)
                record['url'] = url
                record['_raw'] = util.tojson(response)
            except Exception as e:
                record = dict()
                record['url'] = url
                record['error'] = e
                record['_raw'] = util.tojson(response)
            yield record