Ejemplo n.º 1
0
	def program_cleanup(sig_num, frame):
		logger = log.getLogger("upq")
		logger.info("Shutting down socket server...")
		server.shutdown()
		logger.info("Disconnecting from DB...")
		upqdb.UpqDB().cleanup()
		log.getLogger("upq").info("Good bye.")
		sys.exit(0)
 def __init__(self):
     super(Application, self).__init__(sys.argv)
     self.mainwin = None
     self.log_window = None
     self.statusBar = None
     self.progressBar = None
     self.splash = None
     self.messages = None
     self.g_timers = {}
     self.logger_async = log.getLogger('mh.callAsync')
     self.logger_redraw = log.getLogger('mh.redraw')
     self.logger_event = log.getLogger('mh.event')
Ejemplo n.º 3
0
 def api_uri(self):
     if not self._api_uri:
         logger = getLogger('tribe-scraper.ark')
         logger.debug('First time: Getting {} api_uri_format from firebase'.format(self))
         api_uri_format = data.get_firebase_value('/configuration/api-uri-format')
         self._api_uri = api_uri_format.format(self.server_uri)
     return self._api_uri
Ejemplo n.º 4
0
	def __init__(self, conf):
		self._user = conf.get('user')
		self._password = conf.get('password')
		self._maildir = conf.get('maildir')
		self._mailserver = conf.get('mailserver')
		self._port = 993
		self._logger = log.getLogger(self.__module__)
Ejemplo n.º 5
0
 def _load(self, name):
     logger = log.getLogger('mh.load')
     logger.debug('loading target %s', name)
     try:
         self._load_binary(name)
     except StandardError, _:
         self._load_text(name)
Ejemplo n.º 6
0
    def _p_resolveConflict(self, old, store, new):

        try:
            resolved_data = resolveDict(old['data'], store['data'], new['data'])
        except:
            log.debug('Error during conflict resolution for %r' % self,
                          exc_info=True)
            raise
            
        resolved = new.copy()
        resolved['data'] = resolved_data
        
        if log.getLogger().level <= log.DEBUG:
            deleted = set(new['data']) - set(resolved_data)
            added = set(resolved_data) - set(new['data'])
            changed = set(key for key in resolved_data \
                          if key in new['data'] and key in resolved_data \
                          and resolved_data[key] != new['data'][key])
            parts = ['Conflict resolved for %r' % self]
            if deleted:
                parts.append('key %s deleted' % \
                             ', '.join(repr(k) for k in deleted))
            if added:
                parts.append('key %s added' % \
                             ', '.join(repr(k) for k in added))
            if changed:
                parts.append('key %s changed' % \
                             ', '.join(repr(k) for k in changed))
            log.debug(', '.join(parts))

        return resolved
Ejemplo n.º 7
0
 def getMapping(self):
     mapping = {}
     for config in self.configManager.configs:
         logger = log.getLogger(config=config)
         virt = Virt.fromConfig(logger, config)
         mapping[config.name or 'none'] = self._readGuests(virt)
     return mapping
Ejemplo n.º 8
0
    def __init__(self, shortcuts):
        self.logger = log.getLogger("storytext record")
        # Store events we don't record at the top level, usually controls on recording...
        self.eventsBlockedTopLevel = []
        self.scripts = []
        self.comments = []
        self.processId = os.getpid()
        self.applicationEvents = OrderedDict()
        self.supercededAppEventCategories = {}
        self.suspended = 0
        self.realSignalHandlers = {}
        self.origSignal = signal.signal
        self.signalNames = {}
        self.stateChangeEventInfo = {}
        self.delayedEvents = []
        self.applicationEventLock = Lock()
        self.hasAutoRecordings = False
        recordScript = os.getenv("USECASE_RECORD_SCRIPT")
        if recordScript:
            self.addScript(recordScript, shortcuts)
            if os.pathsep != ";": # Not windows! os.name and sys.platform don't give this information if using Jython
                self.addSignalHandlers()

        for entry in dir(signal):
            if entry.startswith("SIG") and not entry.startswith("SIG_"):
                number = getattr(signal, entry)
                self.signalNames[number] = entry
Ejemplo n.º 9
0
def main():
    logger = getLogger('tribe-scraper.main')
    ark = Ark('FernGully')
    for p in get_players():
        try:
            submit_player(p, ark)
        except Exception as e:
            logger.error(e)
Ejemplo n.º 10
0
 def _load(self, name):
     logger = log.getLogger('mh.load')
     logger.debug('loading target %s', name)
     try:
         self._load_binary(name)
     except Exception as e:
         self._load_text(name)
     logger.debug('loaded target %s', name)
Ejemplo n.º 11
0
	def check_delete(self):
		if (self.started and time.time() - self.started > 3600*24*3) or (self.finished and time.time() - self.finished > 3600 and not self.isActive()):
			if not os.path.exists(config.LOG_DIR + "/tasks"):
				os.makedirs(config.LOG_DIR + "/tasks")
			logger = log.getLogger(config.LOG_DIR + "/tasks/%s"%self.id)
			logger.lograw(str(self.dict(True)))
			logger.close()
			del processes[self.id]
Ejemplo n.º 12
0
 def test_logger():
     logger = log.getLogger('main.test')
     adapter = TaskLoggerAdapter(logger)
     t = Task()
     adapter.set_task(t)
     adapter.info('hi')
     adapter.info('test2')
     print t.output
Ejemplo n.º 13
0
 def get_active_players(self):
     logger = getLogger('tribe-scraper.ark')
     logger.info('Getting player list from {}'.format(self.api_uri))
     response = requests.get(self.api_uri)
     response.raise_for_status()
     players = [Player(**p) for p in response.json()['players']]
     logger.info('{}: {} player(s)'.format(response, len(players)))
     return players
Ejemplo n.º 14
0
 def __init__(self, exe, cmd, cwd, env=None, outcome=0, decode=True):
     self.cmd = cmd
     self.cmd.insert(0, exe)
     self.cwd = cwd
     self.env = env
     self.decode = decode
     self.outcome = outcome
     self.logger = getLogger("sub-process(" + exe + ")")
Ejemplo n.º 15
0
def patch_firebase_value(fb_path, value):
    logger = getLogger('tribe-scraper.data')
    uri = FIREBASE_URI + fb_path + '.json'
    logger.info('Patching Firebase value at "{}"'.format(uri))
    response = requests.patch(uri, value)
    response.raise_for_status()
    value = response.json()
    logger.info('{}'.format(value))
Ejemplo n.º 16
0
def get_firebase_value(fb_path):
    logger = getLogger('tribe-scraper.data')
    uri = FIREBASE_URI + fb_path + '.json'
    logger.info('Fetching Firebase value at "{}"'.format(uri))
    response = requests.get(uri)
    response.raise_for_status()
    value = response.json()
    logger.info('{}: "{}"'.format(response, value))
    return value
Ejemplo n.º 17
0
 def __init__(self, replayScript, shortcutManager):
     self.replayScript = replayScript
     self.currentShortcuts = []
     self.commandsForMatch = []
     self.commandsForMismatch = []
     self.visitedShortcuts = []
     self.logger = log.getLogger("Shortcut Tracker")
     self.shortcutManager = shortcutManager
     self.reset()
Ejemplo n.º 18
0
	def __init__(self):
		
		self._conf = config()
		self._git_list = self._conf.get('git').split(',')
		self._logger = log.getLogger(self.__module__)
		self._patchdir = self._conf.get('patchdir')
		self._git_dir = self._conf.get('gitdir')
		# mail object
		self._mailobj = mail(self._conf)
Ejemplo n.º 19
0
def getProperAnchor(anchor):

	rv = ""
	try:
		pieces = anchor.split("||")
		min_diff = 100
		for piece in pieces:
			try:
				candi = ""
				if len(piece) > 150:
					continue
				elif piece.startswith("http://"):
					continue
				elif piece.startswith("by "):
					continue
				elif len(piece) < 15:
					if piece.lower().find("comment") >= 0:
						continue
					for c in piece.strip():
						if not c in string.punctuation :
							candi += c
					try:
						num =  int(candi)	
						candi = candi
					except Exception, msg:
						pass
					else:
						continue
					if "".join(candi.split()) in ["조회수","추천수", "이전","다음", "새창", "Offline", "인증메일재발송", "제목순", "날짜순", "인기순", "좋아요순", "ReadMore", "조회", "등록일", "상위항목", "최신순", "정확도순", "평점순", "조회순"]:
						continue
					else:
						candi = piece
				else:
					candi = piece
				
				t_diff = abs(len(candi) - 45)
				if t_diff < min_diff:
					if len(rv) > 0 :
						if abs(len(rv) - 45) > t_diff:
							rv = candi
					else:
						rv = candi
			except Exception, msg:
				getLogger().error(msg)
Ejemplo n.º 20
0
 def __init__(self, profile, testMode=False):
     self.repo_dir = profile.get("git") 
     if not os.path.exists(self.repo_dir):
         raise Exception("Git repo " + self.repo_dir + " does not exist!")
     if not os.path.exists(os.path.join(self.repo_dir,'.git')):
         raise Exception(self.repo_dir + " is not a git repo! -- no .git folder found")
     self.logger = getLogger("Git")
     #self.versionCheck()
     self.testMode = testMode
     if self.testMode:
         self.logger.info("Test mode enabled")
Ejemplo n.º 21
0
	def run(self):
		for key,value in self.jobdata.items():
			msg=self.jobdata['job']['msgstr']
			if key=="mail":
				if self.jobdata['success']:
					subject="Error"
				else:
					subject="Success"
				log.getLogger().debug("notify mail(value, msg, subject)=mail(%s, %s, %s)", value, msg, subject)
				self.mail(value, msg, subject)
			elif key=="syslog":
				log.getLogger().debug("notify syslog")
				self.syslog(self.jobdata['success'], msg)
			elif key == "retry":
				self._retrywait(value, 0)
			elif key == "retrywait":
				try:
					# value looks like this:
					# retries|time e.g.: 3|1h  or  4|30m  or  1|10s
					tries,waittime = value.split('|')
					tries = int(tries)
					mul1  = int(waittime[:-1])
					mul2 = waittime[-1:]
					if	mul2 == "s": multi = 1
					elif  mul2 == "m": multi = 60
					elif  mul2 == "h": multi = 3600
					else: raise Exception()
				except:
					log.getLogger().error("notify retrywait:%s", value)
					raise ValueError("retrywait -> tries|time -> int|int<s|m|h>")
				self._retrywait(tries, mul1*multi)
		return True
Ejemplo n.º 22
0
    def __init__(self, device = 0, save_dir = "./"):
        self.log_file = "capimges.log"

        self._logger = log.getLogger(self.log_file)
		
        self._saved_dir = save_dir if save_dir else "./"
        self._saved_path = ""
		
        self._cap = cv.CaptureFromCAM(0)
        cv.SetCaptureProperty(self._cap, cv.CV_CAP_PROP_FRAME_HEIGHT, \
                                      240)
        cv.SetCaptureProperty(self._cap, cv.CV_CAP_PROP_FRAME_WIDTH, \
                                      320)
Ejemplo n.º 23
0
    def __init__(self, httpMsg):
        """
        >>> reqText = "GET /path/to/my/eventsource?arg1=1&arg2=2 HTTP/1.1\\r\\nheader: 3\\r\\n\\r\\n"
        >>> req = SimpleHTTPRequest(reqText)
        >>> req.path, req.args, req.method, req.version, req.headers
        ('/path/to/my/eventsource', {'arg1': '1', 'arg2': '2'}, 'GET', (1, 1), {'header': '3'})
        """

        self.log = customLog.getLogger('Processing HTTP request')
        try:
            self.parse(httpMsg)
        except:
            self.set_error(400, "Bad Request")
Ejemplo n.º 24
0
    def start(self, application, default_port):
        """
        Run a WSGI server with the given application.
        """
        pgid = os.getpid()
        try:
            os.setpgid(pgid, pgid)
        except OSError:
            # NOTE: When running glance-control,
            # (glance's functional tests, for example)
            # setpgid fails with EPERM as glance-control
            # creates a fresh session, fo which the newly
            # launched service becomes the leader (session
            # leaders may not change process groups)
            #
            # Running glance-(api|registry) is safe and
            # shouldn't raise error here.
            pgid = 0

        def kill_children(*args):
            """Kill the entire process group."""
            signal_sample.signal_sample(signal_sample.SIGTERM, signal_sample.SIG_IGN)
            signal_sample.signal_sample(signal_sample.SIGINT, signal_sample.SIG_IGN)
            self.running = False
            os.killpg(pgid, signal_sample.SIGTERM)

        def hup(*args):
            """
            Shuts down the server, but allows running requests to complete
            """
            signal_sample.signal_sample(signal_sample.SIGHUP, signal_sample.SIG_IGN)
            self.running = False

        self.application = application
        self.sock = get_socket(default_port)

        os.umask(0o27)  # ensure files are created with the correct privileges
        self.logger = logging.getLogger('glance.wsgi.server')

        if CONF.workers == 0:
            # Useful for profiling, test, debug etc.
            self.pool = self.create_pool()
            self.pool.spwan_n(self._single_run, self.application, self.sock)
            return
        else:
            self.logger.info('Starting %d workers' % CONF.workers)
            signal_sample.signal_sample(signal_sample.SIGTERM, kill_children)
            signal_sample.signal_sample(signal_sample.SIGINT, kill_children)
            signal_sample.signal_sample(signal_sample.SIGHUP, hup)
            while len(self.children) < CONF.workers:
                self.run_child()
def main(BouncerSubclass):
    '''Within a module that defines a subclass for BouncerProcessManager, say FooSubclass,
    you can do this:
        if __init__ == "__main__":
            main(FooSubclass)
    which parses command line arguments, instantiates your sublcass, and runs its server.'''

    _,filename,_,_,_,_ = inspect.getouterframes(inspect.currentframe())[1]
    logname = os.path.basename(filename)

    cwd = os.getcwd()

    default_config = os.path.join(cwd, "bouncer_config.json")


    parser = argparse.ArgumentParser(description='Bouncer process manager for %s' % logname)
    parser.add_argument("-c", "--config", type=str, default=default_config,
                        help="Default=%(default)s. The config file. See bouncer/bouncer_common.py for config-file format.")
    parser.add_argument("-a", "--addr", type=str, default="127.0.0.1",
                        help="Default=%(default)s. Address where the bouncer listens from")
    parser.add_argument("-p", "--port", type=int, default=3001,
                        help="Default=%(default)d. Port where the bouncer listens from")

    log.add_arguments(parser)
    args = parser.parse_args()
    logger = log.getLogger(args, name=logname)
    logger.info("Command line arguments: %s" % str(args))

    try:
        with open(args.config, "r") as f:
            pass
    except:
        logger.critical("Error: could not open config file (%s)" % args.config)
        sys.exit(1)

    if not issubclass(BouncerSubclass, BouncerProcessManager):
        raise ValueError("The given class, %s, is not a subclass of BouncerProcessManager" % bouncerClass)

    config_filename = args.config
    addr = args.addr
    port = args.port
    try:
        with open(config_filename) as f:
            config = Config(f)
        bpm = BouncerSubclass(config, addr, port, logger)
    except:
        logger.critical("Error while parsing config file. View bouncer/bouncer_common.py for format of config.")
        raise
    bpm.run()
Ejemplo n.º 26
0
 def test_get_logger_no_config(self, open, getQueueLogger, isdir):
     open.return_value = None
     isdir.return_value = True
     queueLogger = log.QueueLogger('virtwho')
     queueLogger.logger.handlers = []
     mockQueueLogger = Mock(wraps=queueLogger)
     getQueueLogger.return_value = mockQueueLogger
     options = Mock()
     options.debug = False
     options.background = True
     options.log_file = log.DEFAULT_LOG_FILE
     options.log_dir = log.DEFAULT_LOG_DIR
     options.log_per_config = False
     log.init(options)
     main_logger = log.getLogger(name='main')
     self.assertTrue(main_logger.name == 'virtwho.main')
     self.assertTrue(len(main_logger.handlers) == 1)
     self.assertTrue(isinstance(main_logger.handlers[0], log.QueueHandler))
     queue_handlers = queueLogger.logger.handlers
     self.assertTrue(len(queue_handlers) == 1)
     self.assertEquals(queue_handlers[0].baseFilename, '%s/%s' % (log.DEFAULT_LOG_DIR, log.DEFAULT_LOG_FILE))
Ejemplo n.º 27
0
    def test_get_logger_different_log_file(self, getFileHandler, getDefaultQueueLogger):
        queueLogger = log.QueueLogger('virtwho')
        queueLogger.logger.handlers = []
        mockQueueLogger = Mock(wraps=queueLogger)
        getDefaultQueueLogger.return_value = mockQueueLogger

        config = Mock()
        config.name = 'test'
        config.log_file = 'test.log'
        config.log_dir = '/test/'

        options = Mock()
        options.debug = False
        options.background = True
        options.log_per_config = True
        options.log_dir = ''
        options.log_file = ''
        test_logger = log.getLogger(options, config)

        self.assertTrue(test_logger.name == 'virtwho.test')
        self.assertTrue(len(test_logger.handlers) == 1)
        self.assertTrue(len(queueLogger.logger.handlers) == 1)
        getFileHandler.assert_called_with(test_logger.name, config.log_file, config.log_dir)
Ejemplo n.º 28
0
	def __init__(self, jobname, jobdata):
		# if you add attributes to the UpqJob class that should be carried over
		# through a restart/reschedule, add it to notify_job.jobdata['job']
		# in notify(), if and only if it is (JSON)-serializable!
		self.jobname = jobname
		self.jobcfg  = UpqConfig().jobs[jobname] #settings from config-filea

		# subjobs handling: if a runtime job is available, use it, else the configured ones
		if jobdata.has_key('subjobs'): #runtime set subjobs are available
			jobdata['subjobs']=jobdata['subjobs']
		elif self.jobcfg.has_key('subjobs'):
			# make copy of subjobs, as we modify them later
			jobdata['subjobs']=self.jobcfg['subjobs'][:]
		else:
			jobdata['subjobs'] = [] # no subjobs defined, initialize empty
		self.jobdata = jobdata #runtime parameters, these are stored into database and restored on re-run
		self.logger  = log.getLogger("upq")
		self.thread  = "T-none-0"
		self.jobid   = -1
		self.msgstr  = ""
		self.result  = False
		self.finished= threading.Event()
		self.retries = 0
Ejemplo n.º 29
0
	def _retrywait(self, tries, waittime):
		"""
		redo job 'tries' (int) times, waiting 'waittime' (int) seconds before each run
		"""
		if waittime: log.getLogger().info("Waiting %d seconds before retry.", waittime)
		time.sleep(waittime)
		if self.jobdata['job']['retries'] >= tries:
					log.getLogger().info("Tried %d times, no more retries.", self.jobdata['job']['retries'])
					return False
		else:
			# recreate job
			retry_job = UpqQueueMngr().new_job(self.jobdata['job']['jobname'], self.jobdata['job']['jobdata'])
			for key in self.jobdata['job'].keys():
				# copy data to allow release of old job resources
				setattr(retry_job, key, copy.deepcopy(self.jobdata['job'][key]))

			retry_job.retries += 1
			log.getLogger().info("retrying job '%s' for the %d. time", retry_job.jobname, retry_job.retries)
			UpqQueueMngr().enqueue_job(retry_job)
Ejemplo n.º 30
0
Email: [email protected]

Copyright 2021 - 2021 bluestone tech

'''

import uos
import log
import ujson
import _thread

from usr import bluestone_common

log.basicConfig(level=log.INFO)
_config_log = log.getLogger("CONFIG")


class BluestoneConfig(object):
    inst = None

    def __init__(self, file_name):
        self.lock = _thread.allocate_lock()

        self.config_file_name = file_name
        self.config_path = 'usr:/{}'.format(self.config_file_name)
        self.restart_key_list = [
            'mqtt_tencent', 'socket', 'timer0', 'timer1', 'timer2', 'timer3'
        ]
        self.key_list = [
            'uart0', 'uart1', 'uart2', 'mqtt_tencent', 'socket', 'timer0',
Ejemplo n.º 31
0
import os
import re
import shlex
import subprocess
import sys
import time

import rpm
import system_config_keyboard.keyboard

import base, log
import process
from mplatform.utils import parse_varfile
from fs import File

LOGGER = log.getLogger(__name__)


def reboot():
    """Reboot the system
    """
    process.call(["reboot"])


def async_reboot(delay=3):
    reboot_task = Reboot()
    reboot_task.reboot(delay)


def poweroff():
    """Poweroff the system
Ejemplo n.º 32
0
import yaml
import netaddr
import os
import log as logging

LOG = logging.getLogger("net-recover")
config_path = os.path.join(os.path.dirname(__file__), "network.cfg")


def setup_bondings(bond_mappings):
    print bond_mappings


def add_ovs_port(ovs_br, ifname, uplink, vlan_id=None):
    LOG.info("add_ovs_port enter")
    cmd = "ovs-vsctl --may-exist add-port %s %s" % (ovs_br, ifname)
    if vlan_id:
        cmd += " tag=%s" % vlan_id
    cmd += " -- set Interface %s type=internal;" % ifname
    cmd += "ip link set dev %s address `ip link show %s |awk '/link\/ether/{print $2}'`;" % (
        ifname, uplink)  # noqa
    cmd += "ip link set %s up;" % ifname
    LOG.info("add_ovs_port: cmd=%s" % cmd)
    os.system(cmd)


def setup_ips(ip_settings, sys_intf_mappings):
    LOG.info("setup_ips enter")
    for intf_info in ip_settings.values():
        network = netaddr.IPNetwork(intf_info["cidr"])
        if sys_intf_mappings[intf_info["name"]]["type"] == "ovs":
Ejemplo n.º 33
0
"""

1. calibration

2. Trigger measurement

3. read data

"""

# API 手册 http://qpy.quectel.com/wiki/#/zh-cn/api/?id=i2c
# AHT10 说明书
# https://server4.eca.ir/eshop/AHT10/Aosong_AHT10_en_draft_0c.pdf

log.basicConfig(level=log.INFO)
_i2c_log = log.getLogger("AHT10")


class BluestoneTemperature(object):
    inst = None

    def __init__(self):
        BluestoneTemperature.inst = self

        self.i2c_dev = None
        self.i2c_addr = None

        # Initialization command
        self.AHT10_CALIBRATION_CMD = 0xE1

        # Trigger measurement
Ejemplo n.º 34
0
 def __init__(self, distortions=True, num_parallel_calls=3):
     self.n_parallel = num_parallel_calls
     self.distortions = distortions
     self.logger = log.getLogger('database')
     self.loaders = list(self.DATASETS.values())
 def __init__(self):
     self.logger = log.getLogger('model')
     self.model_dir = 'models/cnn_v13_94x94'
     self.optimizer = tf.train.AdamOptimizer(learning_rate=0.0005)
     self.intermediate_outputs = []
Ejemplo n.º 36
0
from synthesis import connect_vnc
from handoff import HandoffDataRecv

from db.api import DBConnector
from db.table_def import BaseVM
from configuration import Const as Cloudlet_Const
from compression import DecompProc
from pprint import pformat
import log as logging
import random
import png
import mmap
import tool
from delta import DeltaItem

LOG = logging.getLogger(__name__)
session_resources = dict()   # dict[session_id] = obj(SessionResource)

class StreamSynthesisError(Exception):
    pass

class RecoverDeltaProc(multiprocessing.Process):
    FUSE_INDEX_DISK = 1
    FUSE_INDEX_MEMORY = 2

    def __init__(self, base_disk, base_mem,
                 decomp_delta_queue, output_mem_path,
                 output_disk_path, chunk_size,
                 fuse_info_queue, analysis_queue):
        if base_disk is None and base_mem is None:
            raise StreamSynthesisError("Need either base_disk or base_memory")
Ejemplo n.º 37
0
#!/usr/bin/env python

import os
import re
import sys
import shutil
from log import getLogger
from autoprocess import autoProcessTV, autoProcessMovie, autoProcessTVSR, sonarr, radarr
from readSettings import ReadSettings
from mkvtomp4 import MkvtoMp4
import logging
from logging.config import fileConfig

log = getLogger("qBittorrentPostProcess")

log.info("qBittorrent post processing started.")

if len(sys.argv) != 7:
    log.error("Not enough command line parameters present, are you launching this from qBittorrent?")
    log.error("#Args: %L %T %R %F %N %I Category, Tracker, RootPath, ContentPath , TorrentName, InfoHash")
    log.error("Length was %s" % str(len(sys.argv)))
    log.error(str(sys.argv[1:]))
    sys.exit()

settings = ReadSettings()
label = sys.argv[1].lower()
root_path = str(sys.argv[3])
content_path = str(sys.argv[4])
name = sys.argv[5]
torrent_hash = sys.argv[6]
categories = [settings.qBittorrent['cp'], settings.qBittorrent['sb'], settings.qBittorrent['sonarr'], settings.qBittorrent['radarr'], settings.qBittorrent['sr'], settings.qBittorrent['bypass']]
Ejemplo n.º 38
0
'''
@Author: Baron
@Date: 2020-06-22
@LastEditTime: 2020-06-22 17:16:20
@Description: example for module _thread
@FilePath: example_thread_file.py
'''
import _thread
import utime
import log

# 设置日志输出级别
log.basicConfig(level=log.INFO)   
thread_log = log.getLogger("Thread")

a = 0
state = 1
# 创建一个lock的实例
lock = _thread.allocate_lock()

def th_func(delay, id):
	global a
	global state
	while True:
		lock.acquire()  # 获取锁
		if a >= 10:
			thread_log.info('thread %d exit' % id)
			lock.release()  # 释放锁
			state = 0
			break
		a+=1
Ejemplo n.º 39
0
#!/usr/bin/env python
# encoding: utf-8

import log as deployLog
import sys
from utils import *

log = deployLog.getLogger()
checkDependent = ["git", "openssl", "curl"]


def do():
    print "================================================================",
    webaseMsg = '''
              _    _     ______  ___  _____ _____ 
             | |  | |    | ___ \/ _ \/  ___|  ___|
             | |  | | ___| |_/ / /_\ \ `--.| |__  
             | |/\| |/ _ | ___ |  _  |`--. |  __| 
             \  /\  |  __| |_/ | | | /\__/ | |___ 
              \/  \/ \___\____/\_| |_\____/\____/  
    '''
    print webaseMsg
    print "================================================================"
    print "===================== envrionment check... ====================="
    installRequirements()
    checkNginx()
    checkJava()
    checkNodePort()
    checkWebPort()
    checkMgrPort()
    checkFrontPort()
Ejemplo n.º 40
0
from usr import bluestone_mqtt
from usr import bluestone_mqtt_tencent
from usr import bluestone_socket
from usr import bluestone_temperature
from usr import bluestone_timer
'''
下面两个全局变量是必须有的,用户可以根据自己的实际项目修改下面两个全局变量的值,
在执行用户代码前,会先打印这两个变量的值。
'''
PROJECT_NAME = "Bluestone smart DTU"
PROJECT_VERSION = "1.0.0"
checknet = checkNet.CheckNetwork(PROJECT_NAME, PROJECT_VERSION)

# 设置日志输出级别
log.basicConfig(level=log.INFO)
system_log = log.getLogger("MAIN")

state = 1
retry_count = 0
timer0 = None
wdt = None
bs_config = None
bs_data_config = None
bs_uart = None
bs_mqtt = None
bs_aht10 = None
bs_gpio = None
bs_timer = None

timer_name_list = ["timer0", "timer1", "timer2", "timer3"]
timer_list = [Timer.Timer0, Timer.Timer1, Timer.Timer2, Timer.Timer3]
Ejemplo n.º 41
0
import log

formatter = log.Formatter('%(asctime)s, %(levelname)s, %(message)s')

# logowanie do pliku
logger_plik = log.getLogger('log_do_pliku')

file_handler = log.FileHandler('z_file_handlera2.log')
file_handler.setFormatter(formatter)

logger_plik.addHandler(file_handler)
logger_plik.setLevel(log.INFO)

# logowanie na ekran
logger_ekran = log.getLogger('log_na_ekran')

scr_handler = log.StreamHandler()
scr_handler.setFormatter(formatter)

logger_ekran.addHandler(scr_handler)
# nie ustawiam poziomu, aby był domyślny


logger_plik.info('informacja do pliku')
logger_plik.warning('ostrzeżenie do pliku')
logger_plik.critical('JEST ŹLE (do pliku)')

logger_ekran.info('informacja na ekran')
logger_ekran.warning('ostrzeżenie na ekran')
logger_ekran.critical('JEST ŹLE (na ekran)')
Ejemplo n.º 42
0
# coding:utf-8

import requests
from config import *
from util.tools import *
from log import getLogger
from util.http import send_http
from decorator import already, timer
from decorator.shop import parsed_css

logger = getLogger(__name__)


class Shop(object):
    """
    大众点评商铺
    """
    api = API_SHOP_DETAIL

    def __init__(self, shopId):
        self.id = shopId
        self.headers = HEADERS
        self.css_headers = CSS_HEADERS
        self.session = requests.session()
        self.url = self.api.format(id=shopId)
        self.homepage = None
        self._fetched = False
        self.css = None
        self.proxy = None
        self.css_proxy = None
        self.decrypt_dict = None
Ejemplo n.º 43
0
def uploadFilesToPMRHelper(eccJar,
                           eccTop,
                           group,
                           uri,
                           files,
                           timeout=300,
                           javaHome='',
                           eccHost=None):
    logger = getLogger(__name__)

    if eccHost and isLocalHost(eccHost):
        eccHost = None

    if eccHost and files:
        files = copyFilesToECCHost(eccHost, files)
        if not files:
            logger.error(
                "Could not copy snapshot files to host %s for upload to PMR.",
                eccHost)
            return None
    elif not files:
        logger.error("No files give for upload to the PMR.")
        return None

    cmd = ['java', '-jar', os.path.join(eccTop, eccJar)]
    if javaHome:
        cmd[0] = os.path.join(javaHome, 'bin', 'java')
    cmd += [
        '-E',
        os.path.join(eccTop, 'ecchome'), '-F', group, '-U', ';'.join(files),
        '-r', uri
    ]
    if logger.getEffectiveLevel() == logging.DEBUG:
        cmd += ['-d', 'true']

    logger.info('Calling ECC client to upload file(s) to the PMR...')
    if eccHost:
        finished, stdout, stderr, returncode = util.runRemoteCommandStdOutErrCode(
            eccHost, cmd, timeout)
    else:
        finished, stdout, stderr, returncode = util.runCommandStdOutErrCode(
            cmd, timeout)

    logger.debug('ECC client exit code %d. Command output: %s', returncode,
                 stdout)
    if not finished:
        logger.warn(
            "The ECC client did not finish after the configured timeout of %d seconds.",
            timeout)
    elif returncode:
        logger.warn("The ECC client exited with exit code %d.", returncode)
    logger.debug('ECC stdout: %s, ECC stderr: %s', stdout, stderr)

    if eccHost and files:
        cleanFilesOnECCHost(files, eccHost)

    uploadedFiles = []
    for line in stdout.splitlines():
        f = parseFileUploadedLine(line)
        if f:
            uploadedFiles.append(f)
            logger.info('File %s successfully uploaded to the PMR.', f)

    if len(uploadedFiles) < len(files):
        logger.error('Failed to upload all files.')

    return uploadedFiles
Ejemplo n.º 44
0
def copyFilesToECCHost(host, files):
    import paramiko
    logger = getLogger(__name__)
    filesUploaded = []
    if len(files) < 1:
        return filesUploaded

    client = paramiko.SSHClient()
    client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
    client.load_system_host_keys()

    ssh_config = paramiko.SSHConfig()
    user_config_file = os.path.expanduser("~/.ssh/config")
    if os.path.exists(user_config_file):
        with open(user_config_file) as f:
            ssh_config.parse(f)
    host = socket.getfqdn(host)
    cfg = {'hostname': host, 'username': getpass.getuser()}
    user_config = ssh_config.lookup(cfg['hostname'])
    for k in ('hostname', 'username', 'port'):
        if k in user_config:
            cfg[k] = user_config[k]

    if 'identityfile' in user_config:
        cfg['key_filename'] = user_config['identityfile']
    if 'proxycommand' in user_config:
        cfg['sock'] = paramiko.ProxyCommand(user_config['proxycommand'])

    logger.info("Connecting to host %s", host)
    try:
        client.connect(**cfg)
    except paramiko.SSHException:
        logger.exception("Could not connect to host %s.", host)
        return filesUploaded
    except socket.error as e:
        logger.exception("Could not connect to host %s. %d:%s", host, e.errno,
                         os.strerror(e.errno))
        return filesUploaded

    # Setup sftp connection and transmit this script
    sftp = client.open_sftp()

    try:
        sftp.chdir(eccFileDir)  # Test if remote_path exists
    except IOError:
        try:
            sftp.mkdir(eccFileDir)  # Create remote_path
        except IOError:
            logger.error("Could not make directory '%s' on host %s.",
                         eccFileDir, host)
            sftp.close()
            client.close()
            return filesUploaded

    for f in files:
        destination = os.path.join(eccFileDir, os.path.basename(f))
        try:
            sftp.put(f, destination)
        except IOError:
            logger.exception("Could not upload '%s' to %s on host %s.", f,
                             destination, host)
            continue
        filesUploaded.append(destination)

    sftp.close()
    client.close()
    return filesUploaded
Ejemplo n.º 45
0
def createPMRHelper(eccJar,
                    eccTop,
                    component,
                    version,
                    icn,
                    uuid,
                    contactName,
                    group,
                    phone,
                    email,
                    city,
                    country,
                    severity,
                    summary,
                    body,
                    timeout=300,
                    files=None,
                    javaHome='',
                    eccHost=None):
    logger = getLogger(__name__)

    if not files:
        files = []
    if eccHost and isLocalHost(eccHost):
        eccHost = None
    if eccHost and files:
        files = copyFilesToECCHost(eccHost, files)
        if not files:
            logger.error(
                "Could not copy snapshot files to host %s for upload to PMR.",
                eccHost)

    logger.info("Creating PMR...")
    cmd = ['java', '-jar', os.path.join(eccTop, eccJar)]
    if javaHome:
        cmd[0] = os.path.join(javaHome, 'bin', 'java')
    cmd += ['-E', os.path.join(eccTop, 'ecchome')]
    cmd += ['-i', component]
    cmd += ['-v', version]
    cmd += ['-I', icn]
    cmd += ['-u', uuid]
    cmd += ['-N', contactName]
    cmd += ['-F', group]
    cmd += ['-p', phone]
    cmd += ['-s', str(severity)]
    cmd += ['-c', city]
    cmd += ['-C', country]
    cmd += ['-T', summary]
    cmd += ['-t', body]
    cmd += ['-e', email]
    if files:
        cmd += ['-U', ';'.join(files)]
    if logger.getEffectiveLevel() == logging.DEBUG:
        cmd += ['-d', 'true']

    logger.info('Calling ECC client to generate PMR...')
    logger.debug('ECC command: %s', cmd)
    if eccHost:
        finished, stdout, stderr, returncode = util.runRemoteCommandStdOutErrCode(
            eccHost, cmd, timeout)
    else:
        finished, stdout, stderr, returncode = util.runCommandStdOutErrCode(
            cmd, timeout)
    logger.debug('ECC client exit code %d. Command output: %s', returncode,
                 stdout)
    if not finished:
        logger.warn(
            "The ECC client did not finish after the configured timeout of %d seconds.",
            timeout)
    elif returncode:
        logger.warn("The ECC client exited with exit code %d.", returncode)
    logger.debug('ECC stdout: %s, ECC stderr: %s', stdout, stderr)

    if eccHost and files:
        cleanFilesOnECCHost(files, eccHost)

    pmrNumber = ''
    uploadedFiles = []
    srid = ''
    uri = ''

    for line in stdout.splitlines():
        if line.startswith('PMR ID ='):
            pmrNumber = line.split('=', 1)[-1].strip()
            logger.info('Successfully created PMR#%s', pmrNumber)
        elif line.startswith('srid ='):
            srid = line.split('=', 1)[-1].strip()
        elif line.startswith('uri ='):
            uri = line.split('=', 1)[-1].strip()
            logger.info('The PMR uri is %s', uri)
        elif line.startswith('Description ='):
            if line.split('=', 1)[-1].strip().startswith(
                    'This request was detected as a Duplicate'):
                logger.warn('This request was detected as a duplicate PMR.')
        elif files:
            f = parseFileUploadedLine(line)
            if f:
                uploadedFiles.append(f)
                logger.info('File %s successfully uploaded to the PMR', f)

    if len(pmrNumber) == 11:
        pmrNumber = "%s,%s,%s" % (pmrNumber[0:5], pmrNumber[5:8],
                                  pmrNumber[8:11])
    if not pmrNumber:
        logger.error('Failed to generate PMR.')
    if len(uploadedFiles) < len(files):
        logger.error('Failed to upload all files')

    return pmrNumber, srid, uploadedFiles, uri
Ejemplo n.º 46
0
import pandas as pd
from log import getLogger

LOG = getLogger(__name__)


class AggregateAspectScores:

    def __init__(self, df):
        self.df = df

    def aggregateScores(self):

        try:
            ## removing null scores
            self.df = self.df[self.df['score'].notnull()]

            ## normalizing the inconsistent scores
            self.df = self.df.join(pd.concat([self.df.groupby(['hotel_id', 'aspect'])[
                                   'score'].transform('min')], 1, keys=['min_score']))

            self.df = self.df.join(pd.concat([self.df.groupby(['hotel_id', 'aspect'])[
                                   'score'].transform('max')], 1, keys=['max_score']))

            self.df['normal_score'] = (
                self.df['score']-self.df['min_score'])/(self.df['max_score']-self.df['min_score'])

            grp_score = self.df.groupby(['hotel_id', 'aspect'])['score'].transform('last')

            self.df['normal_score'] = self.df['normal_score'].fillna(grp_score / 10000)
Ejemplo n.º 47
0
''' Functions to query and interact with the web front end. '''

import log
logger = log.getLogger('WebFrontEnd', log.DEBUG)

import urllib2, urlparse
import socket
import json

from ZensorDevice import ZensorDevice
from Zensor import Zensor


def get_interface_address(host):
    ''' Get the address of the network interface that would be used to connect to the target. '''
    sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
    sock.connect((host, 12345))
    return sock.getsockname()[0]


SENSOR_FREQUENCIES = {
    'EVERY_10_SECONDS': 10,
    'EVERY_30_SECONDS': 30,
    'EVERY_MINUTE': 60,
    'EVERY_2_MINUTES': 120,
    'EVERY_5_MINUTES': 300,
    'EVERY_10_MINUTES': 600,
    'EVERY_30_MINUTES': 1800,
    'EVERY_HOUR': 3600,
    'EVERY_2_HOURS': 7200,
    'EVERY_4_HOURS': 14400,
 def __init__(self):
     self.logger = log.getLogger('autoencoder')
     self.model_dir = 'models/enc_v5'
     self.optimizer = tf.train.AdamOptimizer(learning_rate=0.001)
     self.encoded = None
Ejemplo n.º 49
0
#!/usr/bin/python
__author__ = 'https://github.com/maksteel'

import log, json
from catalog import ConsumerClient
logger = log.getLogger()

class CerConsumerClient(ConsumerClient):
    """
    Class to make CeR entitled catalog items requests to vRA
    +--------------------------------------+-----------------------------------+
    |                 Id                   |                Name               |
    +--------------------------------------+-----------------------------------+
    | e16d4f88-2620-4284-9337-3d0a447c62e4 |             CER-Linux             |
    | 4524185f-695b-46f0-af06-2b99ef2e3ec9 |            CER-Windows            |
    | f6afc33b-cf2a-43d3-b539-787b45935a94 | eResearch Cluster Resource Report |
    | 0e80c697-9650-4bb9-ae40-5c30165a62aa |  eResearch Cluster Storage Report |
    +--------------------------------------+-----------------------------------+
    """

    def __init__(self, username, password, host="m4lbvrap01.uoa.auckland.ac.nz", tenant="Research"):
        """
        Creates a connection to the UoA-ITS-CeR vRA REST API using the provided
        username and password.
        Parameters:
                    user = user account with access to the vRA portal
                    passowrd = valid password for above user
                    host = m4lbvrap01.uoa.auckland.ac.nz (Default)
                    tenant = Research (Default)
        """
        ConsumerClient.__init__(self, host, username, password, tenant)
Ejemplo n.º 50
0
def central_agent(net_weights_qs, net_gradients_qs, stats_qs):
    logger = log.getLogger(name="central_agent", level=pm.LOG_MODE)
    logger.info("Start central agent...")

    if not pm.RANDOMNESS:
        np.random.seed(pm.np_seed)
        tf.set_random_seed(pm.tf_seed)

    config = tf.ConfigProto()
    config.allow_soft_placement = False
    config.gpu_options.allow_growth = True
    tb_logger = tb_log.Logger(pm.SUMMARY_DIR)
    log_config(tb_logger)

    with tf.Session(config=config) as sess:
        policy_net = network.PolicyNetwork(sess, "policy_net",
                                           pm.TRAINING_MODE, logger)
        if pm.VALUE_NET:
            value_net = network.ValueNetwork(sess, "value_net",
                                             pm.TRAINING_MODE, logger)
        logger.info("Create the policy network, with " +
                    str(policy_net.get_num_weights()) + " parameters")

        sess.run(tf.global_variables_initializer())
        tb_logger.add_graph(sess.graph)
        tb_logger.flush()
        policy_tf_saver = tf.train.Saver(max_to_keep=pm.MAX_NUM_CHECKPOINTS,
                                         var_list=tf.get_collection(
                                             tf.GraphKeys.GLOBAL_VARIABLES,
                                             scope='policy_net'))
        if pm.POLICY_NN_MODEL is not None:
            policy_tf_saver.restore(sess, pm.POLICY_NN_MODEL)
            logger.info("Policy model " + pm.POLICY_NN_MODEL + " is restored.")

        if pm.VALUE_NET:
            value_tf_saver = tf.train.Saver(max_to_keep=pm.MAX_NUM_CHECKPOINTS,
                                            var_list=tf.get_collection(
                                                tf.GraphKeys.GLOBAL_VARIABLES,
                                                scope='value_net'))
            if pm.VALUE_NN_MODEL is not None:
                value_tf_saver.restore(sess, pm.VALUE_NN_MODEL)
                logger.info("Value model " + pm.VALUE_NN_MODEL +
                            " is restored.")

        step = 1
        start_t = time.time()

        if pm.VAL_ON_MASTER:
            validation_traces = []  # validation traces
            tags_prefix = [
                "DRF: ", "SRTF: ", "FIFO: ", "Tetris: ", "Optimus: "
            ]
            for i in range(pm.VAL_DATASET):
                validation_traces.append(trace.Trace(None).get_trace())
            stats = comparison.compare(
                copy.deepcopy(validation_traces),
                logger)  # deep copy to avoid changes to validation_traces
            if not pm.SKIP_FIRST_VAL:
                stats.append(
                    test(policy_net,
                         copy.deepcopy(validation_traces),
                         logger,
                         step=0,
                         tb_logger=tb_logger))
                tags_prefix.append("Init_NN: ")

            f = open(LOG_DIR + "baselines.txt", 'w')
            for i in range(len(stats)):
                jct, makespan, reward = stats[i]
                value = tags_prefix[i] + " JCT: " + str(
                    jct) + " Makespan: " + str(makespan) + " Reward: " + str(
                        reward) + "\n"
                f.write(value)
                tb_logger.add_text(tag=tags_prefix[i], value=value, step=step)
            f.close()
            tb_logger.flush()
            logger.info("Finish validation for heuristics and initialized NN.")

        while step <= pm.TOT_NUM_STEPS:
            # send updated parameters to agents
            policy_weights = policy_net.get_weights()
            if pm.VALUE_NET:
                value_weights = value_net.get_weights()
                for i in range(pm.NUM_AGENTS):
                    net_weights_qs[i].put((policy_weights, value_weights))
            else:
                for i in range(pm.NUM_AGENTS):
                    net_weights_qs[i].put(policy_weights)

            # display speed
            if step % pm.DISP_INTERVAL == 0:
                elaps_t = time.time() - start_t
                speed = step / elaps_t
                logger.info("Central agent: Step " + str(step) + " Speed " +
                            '%.3f' % speed + " batches/sec" + " Time " +
                            '%.3f' % elaps_t + " seconds")

            # statistics
            if pm.TRAINING_MODE == "RL":
                policy_net.anneal_entropy_weight(step)
                tb_logger.add_scalar(tag="Entropy Weight",
                                     value=policy_net.entropy_weight,
                                     step=step)
                if pm.EPSILON_GREEDY:
                    eps = 2 / (1 +
                               np.exp(step / pm.ANNEALING_TEMPERATURE)) * 0.6
                    tb_logger.add_scalar(tag="Epsilon Greedy",
                                         value=eps,
                                         step=step)

            collect_stats(stats_qs, tb_logger, step)
            if not pm.FIX_LEARNING_RATE:
                if step in pm.ADJUST_LR_STEPS:
                    policy_net.lr /= 2
                    if pm.VALUE_NET:
                        value_net.lr /= 2
                    logger.info("Learning rate is decreased to " +
                                str(policy_net.lr) + " at step " + str(step))
            if step < pm.STEP_TRAIN_CRITIC_NET:  # set policy net lr to 0 to train critic net only
                policy_net.lr = 0.0

            if step % pm.DISP_INTERVAL == 0:
                tb_logger.add_scalar(tag="Learning rate",
                                     value=policy_net.lr,
                                     step=step)

            # save model
            if step % pm.CHECKPOINT_INTERVAL == 0:
                name_prefix = ""
                if pm.TRAINING_MODE == "SL":
                    name_prefix += "sl_"
                else:
                    name_prefix += "rl_"
                if pm.PS_WORKER:
                    name_prefix += "ps_worker_"
                else:
                    name_prefix += "worker_"

                model_name = pm.MODEL_DIR + "policy_" + name_prefix + str(
                    step) + ".ckpt"
                path = policy_tf_saver.save(sess, model_name)
                logger.info("Policy model saved: " + path)
                if pm.VALUE_NET and pm.SAVE_VALUE_MODEL:
                    model_name = pm.MODEL_DIR + "value_" + name_prefix + str(
                        step) + ".ckpt"
                    path = value_tf_saver.save(sess, model_name)
                    logger.info("Value model saved: " + path)

            # validation
            if pm.VAL_ON_MASTER and step % pm.VAL_INTERVAL == 0:
                test(policy_net, copy.deepcopy(validation_traces), logger,
                     step, tb_logger)

            # poll and update parameters
            poll_ids = set([i for i in range(pm.NUM_AGENTS)])
            avg_policy_grads = []
            avg_value_grads = []
            while True:
                for i in poll_ids.copy():
                    try:
                        if pm.VALUE_NET:
                            policy_gradients, value_gradients = net_gradients_qs[
                                i].get(False)
                        else:
                            policy_gradients = net_gradients_qs[i].get(False)
                        poll_ids.remove(i)
                        if len(avg_policy_grads) == 0:
                            avg_policy_grads = policy_gradients
                        else:
                            for j in range(len(avg_policy_grads)):
                                avg_policy_grads[j] += policy_gradients[j]
                        if pm.VALUE_NET:
                            if len(avg_value_grads) == 0:
                                avg_value_grads = value_gradients
                            else:
                                for j in range(len(avg_value_grads)):
                                    avg_value_grads[j] += value_gradients[j]
                    except:
                        continue
                if len(poll_ids) == 0:
                    break
            for i in range(0, len(avg_policy_grads)):
                avg_policy_grads[i] = avg_policy_grads[i] / pm.NUM_AGENTS
            policy_net.apply_gradients(avg_policy_grads)

            if pm.VALUE_NET:
                for i in range(0, len(avg_value_grads)):
                    avg_value_grads[i] = avg_value_grads[i] / pm.NUM_AGENTS
                value_net.apply_gradients(avg_value_grads)

            # visualize gradients and weights
            if step % pm.VISUAL_GW_INTERVAL == 0 and pm.EXPERIMENT_NAME is None:
                assert len(policy_weights) == len(avg_policy_grads)
                for i in range(0, len(policy_weights), 10):
                    tb_logger.add_histogram(tag="Policy weights " + str(i),
                                            value=policy_weights[i],
                                            step=step)
                    tb_logger.add_histogram(tag="Policy gradients " + str(i),
                                            value=avg_policy_grads[i],
                                            step=step)
                if pm.VALUE_NET:
                    assert len(value_weights) == len(avg_value_grads)
                    for i in range(0, len(value_weights), 10):
                        tb_logger.add_histogram(tag="Value weights " + str(i),
                                                value=value_weights[i],
                                                step=step)
                        tb_logger.add_histogram(tag="Value gradients " +
                                                str(i),
                                                value=avg_value_grads[i],
                                                step=step)

            step += 1

        logger.info("Training ends...")
        if pm.VALUE_NET:
            for i in range(pm.NUM_AGENTS):
                net_weights_qs[i].put(("exit", "exit"))
        else:
            for i in range(pm.NUM_AGENTS):
                net_weights_qs[i].put("exit")
        # os.system("sudo pkill -9 python")
        exit(0)
Ejemplo n.º 51
0
from google.cloud import bigquery

import configparser
import log

logger = log.getLogger(__name__)

config = configparser.ConfigParser()
config.read('config.txt')

client = bigquery.Client(project=config['project']['id'])

dataset_id = 'kraken'
table_id = 'OHLC'
table_ref = client.dataset(dataset_id).table(table_id)
table = client.get_table(table_ref)


def insertOHLC(krakenOHLC):
    rows_to_insert = krakenOHLC.getCommited()
    errors = client.insert_rows(table, rows_to_insert)

    if (errors != []):
        logger.error(errors)
        return False

    return True
#try:
#    from elijah.provisioning.server import NetworkUtil
#    from elijah.provisioning.configuration import Const
#    from elijah.provisioning.package import VMOverlayPackage
#    from elijah.provisioning.synthesis_protocol import Protocol
#except ImportError as e:
#    sys.stderr.write("%s\n" % str(e))
#    sys.exit(1)
from server import NetworkUtil
from configuration import Const
from configuration import VMOverlayCreationMode
from synthesis_protocol import Protocol
import process_manager
import log as logging

LOG = logging.getLogger(__name__)


ACK_DATA_SIZE = 100*1024


class StreamSynthesisClientError(Exception):
    pass

class NetworkMeasurementThread(threading.Thread):
    def __init__(self, sock, blob_sent_time_dict, monitor_network_bw, vm_resume_time_at_dest):
        self.sock = sock
        self.blob_sent_time_dict = blob_sent_time_dict

        # shared memory
        self.monitor_network_bw = monitor_network_bw
Ejemplo n.º 53
0
 def setUpClass(cls):
     """
     Perform class setup before running the testcase
     Remove shared memory files, start vpp and connect the vpp-api
     """
     gc.collect()  # run garbage collection first
     random.seed()
     cls.logger = getLogger(cls.__name__)
     cls.tempdir = tempfile.mkdtemp(prefix='vpp-unittest-%s-' %
                                    cls.__name__)
     cls.file_handler = FileHandler("%s/log.txt" % cls.tempdir)
     cls.file_handler.setFormatter(
         Formatter(fmt='%(asctime)s,%(msecs)03d %(message)s',
                   datefmt="%H:%M:%S"))
     cls.file_handler.setLevel(DEBUG)
     cls.logger.addHandler(cls.file_handler)
     cls.shm_prefix = cls.tempdir.split("/")[-1]
     os.chdir(cls.tempdir)
     cls.logger.info("Temporary dir is %s, shm prefix is %s", cls.tempdir,
                     cls.shm_prefix)
     cls.setUpConstants()
     cls.reset_packet_infos()
     cls._captures = []
     cls._zombie_captures = []
     cls.verbose = 0
     cls.vpp_dead = False
     cls.registry = VppObjectRegistry()
     cls.vpp_startup_failed = False
     cls.reporter = KeepAliveReporter()
     # need to catch exceptions here because if we raise, then the cleanup
     # doesn't get called and we might end with a zombie vpp
     try:
         cls.run_vpp()
         cls.reporter.send_keep_alive(cls)
         cls.vpp_stdout_deque = deque()
         cls.vpp_stderr_deque = deque()
         cls.pump_thread_stop_flag = Event()
         cls.pump_thread_wakeup_pipe = os.pipe()
         cls.pump_thread = Thread(target=pump_output, args=(cls, ))
         cls.pump_thread.daemon = True
         cls.pump_thread.start()
         cls.vapi = VppPapiProvider(cls.shm_prefix, cls.shm_prefix, cls)
         if cls.step:
             hook = StepHook(cls)
         else:
             hook = PollHook(cls)
         cls.vapi.register_hook(hook)
         cls.sleep(0.1, "after vpp startup, before initial poll")
         try:
             hook.poll_vpp()
         except VppDiedError:
             cls.vpp_startup_failed = True
             cls.logger.critical(
                 "VPP died shortly after startup, check the"
                 " output to standard error for possible cause")
             raise
         try:
             cls.vapi.connect()
         except Exception:
             try:
                 cls.vapi.disconnect()
             except Exception:
                 pass
             if cls.debug_gdbserver:
                 print(
                     colorize(
                         "You're running VPP inside gdbserver but "
                         "VPP-API connection failed, did you forget "
                         "to 'continue' VPP from within gdb?", RED))
             raise
     except Exception:
         try:
             cls.quit()
         except Exception:
             pass
         raise