Ejemplo n.º 1
0
    def __init__(self,
                 queue_name,
                 route,
                 persister,
                 wnum,
                 port,
                 logfile=sys.stdout):
        self.queue_name = queue_name
        self.route = route
        self.persister = persister
        self.stop = False
        self.pid = os.getpid()
        self.worker_id = '-'.join(
            ['worker', str(wnum), queue_name, route,
             str(self.pid)])
        self.log = Logger(self.worker_id,
                          logfile=logfile,
                          loglevel=logging.DEBUG)
        self.log.info("starting")

        self.host = socket.gethostbyname(socket.gethostname())
        self.port = port
        self.register()
        self.todo = None
        self.stop = False
Ejemplo n.º 2
0
def open_script_config(productModel):

    dict_script = {
        "default": "default",  #没有适配的手机
        "Xiaomi": "default",
        "samsung": "default",
        "360": "default",
        "Lenovo": "default",
        "vivo": "default",
        "HUAWEI": "default",
        "OPPO": "default",
        "gionee": "default",
        "xlj": "default",
        "yunso": "default",
        "oysin": "default"
    }

    try:
        if productModel in dict_script:
            return dict_script[productModel]
        else:
            return dict_script['default']
    except:
        log = Logger()
        log.error("model_config.py, open_model_config : get model error")
        return dict_script['default']
Ejemplo n.º 3
0
def main(args):
    loader = Loader(args.run_name)
    logger = Logger(args.run_name, create_if_exists=False)
    option = loader.load_option()
    layers = [int(l) for l in option["layers"].split(",")]
    params = loader.load_params()

    Model = get_model_cls_by_type(option["type"])
    model = Model(layers, option["nc"], option["omega"])
    model.update_net_params(params)

    if args.size == 0:
        orig_img_fn = loader.get_image_filename("original")
        img = Image.open(orig_img_fn)
        width = img.width
        height = img.height
    else:
        width = args.size
        height = args.size

    estimate_and_save_image(model, width, height, logger)
    if option["nc"] == 1:
        estimate_and_save_gradient(model, width, height, logger)
        estimate_and_save_laplacian(model, width, height, logger)

    if option["size"] != 0:
        # PIL resize as reference
        orig_pil_img = loader.load_pil_image("original")
        resized_pil = orig_pil_img.resize((width, height))
        pil_output_name = "pil_{}x{}".format(width, height)
        logger.save_image(pil_output_name, resized_pil)
Ejemplo n.º 4
0
 def __init__(self, code: CodeBlock):
     self.code = BlockStmt(code)
     self.log = Logger("Algo")
     self.strict_typing = False
     self.callback_stop = lambda: ()
     self.callback_input = None
     self.callback_print = None
     self.map = {
         DisplayStmt: self.exec_display,
         InputStmt: self.exec_input,
         AssignStmt: self.exec_assign,
         IfStmt: self.exec_if,
         ForStmt: self.exec_for,
         WhileStmt: self.exec_while,
         BreakStmt: self.exec_break,
         ContinueStmt: self.exec_continue,
         FuncStmt: self.exec_function,
         ReturnStmt: self.exec_return,
         CallStmt: self.exec_call,
         ElseStmt: self.exec_else,
         BaseStmt: lambda _: (),
         CommentStmt: lambda _: (),
         StopStmt: self.exec_stop,
         SleepStmt: self.exec_sleep
     }
Ejemplo n.º 5
0
    def __init__(self, expr: str):
        """Initializes the Parser instance.

        expr -- the expression to be parsed"""
        self.expression = expr
        self.tokens = []
        self.index = 0
        self.log = Logger("Parser")
Ejemplo n.º 6
0
def main(args):
    layers = [int(l) for l in args.layers.split(",")]

    Model = get_model_cls_by_type(args.type)
    DataLoader = get_data_loader_cls_by_type(args.type)

    data_loader = DataLoader(args.file, args.nc, args.size, args.batch_size)
    model = Model(layers, args.nc, args.omega)
    optimizer = JaxOptimizer("adam", model, args.lr)

    name = args.file.split(".")[0]
    logger = Logger(name)
    logger.save_option(vars(args))

    gt_img = data_loader.get_ground_truth_image()
    logger.save_image("original", data_loader.original_pil_img)
    logger.save_image("gt", gt_img)

    iter_timer = Timer()
    iter_timer.start()

    def interm_callback(i, data, params):
        log = {}
        loss = model.loss_func(params, data)
        log["loss"] = float(loss)
        log["iter"] = i
        log["duration_per_iter"] = iter_timer.get_dt() / args.print_iter

        logger.save_log(log)
        print(log)

    print("Training Start")
    print(vars(args))

    total_timer = Timer()
    total_timer.start()
    last_data = None
    for _ in range(args.epoch):
        data_loader = DataLoader(args.file, args.nc, args.size,
                                 args.batch_size)
        for data in data_loader:
            optimizer.step(data)
            last_data = data
            if optimizer.iter_cnt % args.print_iter == 0:
                interm_callback(optimizer.iter_cnt, data,
                                optimizer.get_optimized_params())

    if not optimizer.iter_cnt % args.print_iter == 0:
        interm_callback(optimizer.iter_cnt, data,
                        optimizer.get_optimized_params())

    train_duration = total_timer.get_dt()
    print("Training Duration: {} sec".format(train_duration))
    logger.save_net_params(optimizer.get_optimized_params())
    logger.save_losses_plot()
Ejemplo n.º 7
0
    def util_load(self, file):
        file_name = os.path.basename(file)
        file_name = file_name.replace(".pyc", "_case").replace(".py", "_case")
        name_info = file_name.split("_", 1)

        self.case_id = name_info[0]
        self.case_name = name_info[1]
        self.logger = Logger(self.case_id)
        self.case_info = CaseInfo(self.case_id, self.case_name,
                                  "Functional testing")
        self.case_info.set_log_path(self.logger.log_file_rel_report)
Ejemplo n.º 8
0
    def test_device_connect(self):
        try:
            outInfo = subprocess.Popen('adb get-serialno',
                                       shell=True,
                                       stdout=subprocess.PIPE)
            out, err = outInfo.communicate()

            if out:
                return True  # 连接成功
            else:
                log = Logger()
                log.error(
                    "device_handle.py, test_device_connect : no devices found")
                exit(1)

        except Exception:
            #print e #此处需要打印log
            exit(1)
Ejemplo n.º 9
0
    def get_uidump(self):
        '''
        获取当前Activity控制树
        '''
        temp_parent_path = os.getcwd()
        xml_path_parent = temp_parent_path + '\\temp'
        if (os.path.exists(xml_path_parent) == False):
            os.makedirs(xml_path_parent)
        xml_path = xml_path_parent + '\\uidump.xml'

        #time_start = time.clock()
        try:
            run_command = "adb pull /data/local/tmp/uidump.xml {path}".format(
                path=xml_path)
            os.popen("adb shell uiautomator dump /data/local/tmp/uidump.xml")
            os.popen(run_command)
        except:
            log = Logger()
            log.error("install_app_by_sax.py, get_uidump : xml can not pull")
            exit(1)
Ejemplo n.º 10
0
    def __init__(self, strict=False):
        self.frames = [{}]

        for name, item in mlib.__dict__.items():
            if isinstance(item, types.ModuleType):
                for member_name, member in item.__dict__.items():
                    if callable(member):  # if function
                        doc_func = mlib.find_function(member_name)

                        if doc_func:
                            member.doc_spec = doc_func

                        self.frames[0][member_name] = member
                    elif member_name.startswith("c_"):  # if constant
                        self.frames[0][member_name[2:]] = member

        for alias, func in mlib.docs.ext_aliases:
            self.frames[0][alias] = self.frames[0][func]

        self.log = Logger("Eval")
        self.strict_typing = strict
Ejemplo n.º 11
0
def open_model_config(deviceModel):

    dict_deviceModel={
        "default" : "default",
        "OPPO_R9_Plusm_A": "OPPO",

        "vivo_Y67A" : "vivo",

        "MI_5": "Xiaomi",
        "Redmi_Note_4": "Xiaomi",
        "2014501": "Xiaomi",
        "Redmi_Note_3": "Xiaomi",
        "MI_2" : "Xiaomi", #安装后无法打开

        "SM-G9300": "samsung",

        "1503-A01": "360",

        "Lenovo_K30-TM": "Lenovo",
        "Lenovo_A828t": "Lenovo",

        "F103": "gionee",

        "HLJ6": "xlj",
        "LA-S6": "xlj",

        "D9": "yunso",

        "OYSIN_X8": "oysin"

    }
    try:
        if deviceModel in dict_deviceModel:
            return dict_deviceModel[deviceModel]
        else:
            return dict_deviceModel['default']
    except:
        log = Logger()
        log.error("model_config.py, open_model_config : get model error")
        return dict_deviceModel['default']
Ejemplo n.º 12
0
        'l_rate':1e-8,
        'lr_gan': 0.00002,
        'lr_refine': 1e-6,
        'beta1': 0.5,
        'data_path':'datasets',
        'n_epoch':1000,
        'batch_size':10,
        'num_workers':10,
        'print_freq':10,
        'device_ids':[1],
        'domainA': 'Lip',
        'domainB': 'Indoor',
        'weigths_pool': 'pretrain_models',
        'pretrain_model': 'deeplab.pth',
        'fineSizeH':241,
        'fineSizeW':121,
        'input_nc':3,
        'name': 'v3_5_t->s_Refine',
        'checkpoints_dir': 'checkpoints',
        'net_D': 'NoBNSinglePathdilationMultOutputNet',
        'use_lsgan': True,
        'resume':None#'checkpoints/v3_1/',
    }

    logger = Logger(
        log_file='./log/' + args['name'] + '-' + time.strftime("%Y-%m-%d_%H:%M:%S", time.localtime()) + '.log')
    logger.info('------------ Options -------------\n')
    for k, v in args.items():
        logger.info('%s: %s' % (str(k), str(v)))
    logger.info('-------------- End ----------------\n')
    main()
Ejemplo n.º 13
0
from util.log import Logger

device = DeviceHandle()

#检查手机连接是否成功
is_connect = device.test_device_connect()

debug = False  #dubug模式

#手机连接成功
if is_connect:
    #运行debug
    if debug:
        info = android_info()  #则将手机信息保存到android_info.txt中
        info.run()
        log = Logger()
        log.info("debug is open")

    #获取手机型号信息 OPPO_R9_Plusm_A
    deviceModel = device.getModel()

    #获取机型对应的品牌 oppo、xiaomi
    productModel = open_model_config(deviceModel)
    if productModel == '':
        productModel = device.getBrand()  #如果未从列表中获取对应的品牌,则获取手机中的品牌信息做匹配

    #根据获得的品牌获得对应的运行模块
    json_script = open_script_config(productModel)

    #动态导入要运行的包\lib\xxx
    package_name = 'script.' + json_script
This is an example on how to import an sbml file
create a report for a time course simulation
and run a time course simulation
"""
"""
Contains simulation code using COPASI python bindings
"""

import os
import json
from config import Config
import requests
from COPASI import *
from sim_spec_manager import SimulationSpecManager
from util.log import Logger
logger = Logger(push_to_crbmapi=True)

# create a datamodel
try:
    dataModel = CRootContainer.addDatamodel()
except:
    dataModel = CRootContainer.getUndefinedFunction()

sim_spec_manager = SimulationSpecManager()
if not sim_spec_manager.parse_status:
    logger.error("Error encountered while parsing omex")
    sys.exit()


def main():
    # the only argument to the main routine should be the name of an SBML file
Ejemplo n.º 15
0
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu

    model_path = args.model_path
    checkpoint = args.checkpoint
    model_type = args.model_type
    batch_size = args.batch_size
    num_workers = args.num_workers
    sample_method = args.sample
    seq_len = args.target_frames
    gap = args.gap
    print('model_path: ', model_path + '/' + checkpoint)
    if not os.path.exists(model_path):
        os.makedirs(model_path)
    if args.logger_path is None:
        sys.stdout = Logger(model_path + '/log_test.txt')
    else:
        sys.stdout = Logger(args.logger_path)

    data_root = args.data_root
    data_type = args.data_name
    val_dir = os.path.join(data_root, data_type, 'val')

    val_dataset = rwf2000_dataset.RWFDataset(directory=val_dir,
                                             data_augmentation=False,
                                             target_frames=seq_len,
                                             sample=sample_method,
                                             gap=gap)
    val_loader = torch.utils.data.DataLoader(dataset=val_dataset,
                                             batch_size=batch_size,
                                             shuffle=False,
Ejemplo n.º 16
0
import traceback
import logging

from action.PageAction import *
from config.VarConfig import (testCase_testIsExecute, testCase_testStepName,
                              testStep_testNum, testStep_testStepDescribe,
                              testStep_keyWord, testStep_elementBy,
                              testStep_elementLocator, testStep_operateValue,
                              testCase_testResult, smtp_server, port, sender,
                              psw, receiver, excelPath)
from util.log import Logger
from util.ParseExcel import ParseExcel

log = Logger(__name__, CmdLevel=logging.INFO, FileLevel=logging.INFO)
p = ParseExcel()
sheetName = p.wb.sheetnames  # 获取到excel的所有sheet名称


def test_126_mail_send_with_att():
    try:
        test_case_pass_num = 0
        required_case = 0
        is_execute_column_values = p.get_column_value(
            sheetName[0], testCase_testIsExecute)  #获取该行是都需要执行
        print(is_execute_column_values)
        # print(columnValues)
        for index, value in enumerate(is_execute_column_values):
            print(index, value)
            # 获取对应的步骤sheet名称
            step_sheet_name = p.get_cell_of_value(sheetName[0], index + 2,
                                                  testCase_testStepName)
Ejemplo n.º 17
0
    def __init__(self):

        self.log = Logger()
Ejemplo n.º 18
0
    def real_time_recognize(
        self,
        width=640,
        height=360,
        resize=1.0,
        detector="mtcnn",
        flip=False,
        graphics=True,
        socket=None,
        mtcnn_stride=1,
    ):
        """Real-time facial recognition
        :param width: width of frame (default: 640)
        :param height: height of frame (default: 360)
        :param resize: resize scale (default: 1. = no resize)
        :param detector: face detector type (default: "mtcnn")
        :param flip: whether to flip horizontally or not (default: False)
        :param graphics: whether or not to use graphics (default: True)
        :param socket: socket (dev) (default: None)
        :param mtcnn_stride: stride frame stride (default: 1)
        """

        assert self._db, "data must be provided"
        assert 0.0 <= resize <= 1.0, "resize must be in [0., 1.]"

        graphics_controller = GraphicsRenderer(width, height, resize)
        logger = Logger(frame_limit=10, frame_threshold=5)
        pbar = ProgressBar(logger, ws=socket)
        cap = Camera()
        detector = FaceDetector(detector,
                                self.img_shape,
                                min_face_size=240,
                                stride=mtcnn_stride)

        while True:
            _, frame = cap.read()
            cframe = frame.copy()

            # resize frame
            if resize != 1:
                frame = cv2.resize(frame, (0, 0), fx=resize, fy=resize)

            # facial detection and recognition
            info = self.recognize(frame, detector, flip=flip)
            face, is_recognized, best_match, elapsed = info

            # logging and socket
            if is_recognized and is_looking(face):
                log_result = logger.log(best_match)
                pbar.update(end=log_result is not None)
                if log_result and socket:
                    socket.send(json.dumps({"best_match": best_match}))

            # graphics
            if graphics:
                graphics_controller.add_graphics(cframe, *info)
                cv2.imshow("AI Security v2021.0.1", cframe)
                if cv2.waitKey(1) & 0xFF == ord("q"):
                    break

        cap.release()
        cv2.destroyAllWindows()
Ejemplo n.º 19
0
    chrome_options = webdriver.ChromeOptions()
    chrome_options.add_argument('--start-maximized')  # 浏览器最大化
    chrome_options.add_argument('--disable-infobars')  # 不提醒chrome正在受自动化软件控制
    prefs = {'download.default_directory': cf.get_value('download_path')}
    chrome_options.add_experimental_option('prefs', prefs)  # 设置默认下载路径
    # chrome_options.add_argument(r'--user-data-dir=D:\ChromeUserData')  # 设置用户文件夹,可免登陆
    driver = webdriver.Chrome('{}\\driver\\chromedriver.exe'.format(
        cf.get_value('root_path')),
                              options=chrome_options)
    cf.set_value('driver', driver)


def main():
    """运行pytest命令启动测试"""
    pytest.main([
        '-v', '-s', 'test_case/', '--html=report/report.html',
        '--self-contained-html'
    ])


if __name__ == '__main__':
    cf.init()  # 初始化全局变量
    get_args()  # 命令行参数解析
    log = Logger('szh')  # 初始化log配置
    set_driver()  # 初始化driver
    main()  # 运行pytest测试集
    cf.get_value('driver').quit()  # 关闭selenium driver

    # 先将util.mail文件send_mail()中的用户名、密码填写正确,再启用发送邮件功能!!!
    # send_mail(['*****@*****.**'])  # 将报告发送至邮箱
Ejemplo n.º 20
0
    def __init__(self,
                 queue_name,
                 routing_keys=None,
                 backend='mongodb',
                 conn_url='localhost:27017',
                 dbname='fwbots',
                 logfile=sys.stdout,
                 pidfile=None):
        """
        routing_keys are a required parameter to specify an n-length list
        of routing keys, which will each be assigned to one worker

        FWBOTS: we are using routing_keys as the account names to load
        """
        self.stop = False
        self.name = queue_name
        self.log = Logger('pool-' + queue_name, logfile=logfile)
        self.persister = get_backend(backend)(conn_url, dbname)

        self.port = -1  # too lazy to actually remove this

        self.workers = {}
        self.manual = {}
        self.auto = {}
        self.insta = {}

        if pidfile:
            self.log.info("writing to pidfile %s" % pidfile)
            with open(pidfile) as f:
                f.write(str(self.pid))
                f.close()

        # TODO this needs to be in shared memory
        wnums = {}

        self.ircbot = IrcListener(self, "#fwbots", self.name,
                                  "irc.freenode.net")

        for key in routing_keys:
            errnum = 0
            try:
                acc = get_twitter_account(key)
                if acc.ty == 'auto':
                    self.log.info('found auto for %s' % key)
                    self.auto[acc.name] = acc
                # if there are multiple manual accs defined, pick only the last one
                elif acc.ty == 'manual':
                    self.log.info('found manual for %s' % key)
                    self.manual[acc.name] = acc
            except NameError:
                errnum += 1
            try:
                acc = get_instagram_account(key)
                self.insta[acc.name] = acc
            except NameError:
                errnum += 1
            if errnum > 1:
                self.log.warn("Could not find any account called %s" % key)

            if key not in wnums:
                wnums[key] = 0
            wnums[key] += 1
            worker = Worker(queue_name, key, self.persister, wnums[key],
                            self.port, logfile)

            thread = KillableThread(target=worker.start_worker)
            thread.start()
            self.workers[worker.worker_id] = (worker, thread)
        # lastly, register yourself
        self.persister.add_pool(self)
Ejemplo n.º 21
0
    target_frames = args.target_frames
    frame_interval = args.frame_interval
    sample = args.sample
    use_thread = args.use_thread
    gap = args.gap

    socket_address = args.socket_address
    host, port = socket_address.split(':')
    port = int(port)
    udp_socket = UDPSocket(host=host, port=port)

    print('model_path: ', model_path + '/' + checkpoint)
    if not os.path.exists(model_path):
        os.makedirs(model_path)
    if args.logger_path is None:
        sys.stdout = Logger(model_path + '/log_realtime.txt')
    else:
        sys.stdout = Logger(model_path + '/log_realtime.txt')

    if model_type == 'rgbonly':
        model = rwf2000_baseline_rgbonly.RWF_RGB()
    elif model_type == 'flowgate':
        model = rwf2000_baseline_flowgate.RWF_FlowGate()

    model = torch.nn.DataParallel(model).cuda()
    model.load_state_dict(torch.load(os.path.join(model_path, checkpoint)))
    model.eval()

    # Load video
    source_url = "rtsp://*****:*****@192.168.1.65:554/Streaming/Channels/1"
    cam_ip = source_url[source_url.find("@") +
Ejemplo n.º 22
0
 def __init__(self, logpath):
     self.logger = Logger(logpath)
     self.logpipeout = LogPipe.createAndStart('OUT', self.logger)
     self.logpipeerr = LogPipe.createAndStart('ERROR', self.logger)
Ejemplo n.º 23
0
-------------------------------------------------
   File Name:save_dbpedia2db
   Author:jasonhaven
   date:2018/4/19
-------------------------------------------------
   Change Activity:2018/4/19:
-------------------------------------------------
"""
import re
import sys
import datetime
from pymongo import MongoClient
from util.log import Logger
from util.io import IOHelper

logger = Logger().get_logger()

if __name__ == '__main__':
    input_triples = "result/triples.txt"

    client = MongoClient()
    client = MongoClient('127.0.0.1', 27017)
    db = client.relation_extraction  # 连接数据库,没有则自动创建
    triples = db.distant_supervised  # 使用集合,没有则自动创建

    triples_sents = IOHelper.read_lines(input_triples)
    triples_sents = list(set(triples_sents))

    if triples_sents == None:
        logger.error('read failed!')
        sys.exit(0)
Ejemplo n.º 24
0
                        help='Set encoder to compute forward and backward'
                        ' hidden states.')

    args = parser.parse_args()

    if validate_args(args):

        lang, datasets = io.load(args.data)
        vocab = {
            'src': Vocab(lang['vocab']['src']),
            'tgt': Vocab(lang['vocab']['tgt']),
            'stack': Vocab(lang['vocab']['stack']),
            'operator': Vocab(lang['vocab']['operator'])
        }

        log = Logger(out_path=args.out)
        line = log.add_text('')
        log.start()

        logger = {
            'log': log,
            'line': line
        }

        settings = model_settings(vocab, args)
        model = build_model(vocab, settings)

        env = {
            'model': model,
            'lang': lang
        }
Ejemplo n.º 25
0
    val_dir = os.path.join(data_root, data_type, 'val')
    pre_cut = args.pre_cut
    pre_cut_len = args.pre_cut_len
    sample_method = args.sample
    seq_len = args.target_frames
    gap = args.gap

    save_path += model_type+'_'+flow_type+'_e'+str(num_epochs)+'_s'+str(step_size)+'_b'+str(batch_size)+'_lr'+\
                str(base_lr)+'_g'+str(gamma)+'_'+sample_method+str(seq_len)
    if args.other_mark is not None:
        save_path += args.other_mark
    print('model save path', save_path)
    if not os.path.exists(save_path):
        os.makedirs(save_path)
    if args.logger_path is None:
        sys.stdout = Logger(save_path + '/log_train.txt')
    else:
        sys.stdout = Logger(args.logger_path)

    train_dataset = rwf2000_dataset.RWFDataset(directory=train_dir,
                                               data_augmentation=True,
                                               target_frames=seq_len,
                                               sample=sample_method,
                                               gap=gap,
                                               pre_cut=pre_cut,
                                               pre_cut_len=pre_cut_len)
    val_dataset = rwf2000_dataset.RWFDataset(directory=val_dir,
                                             data_augmentation=False,
                                             target_frames=seq_len,
                                             sample=sample_method,
                                             gap=gap,
Ejemplo n.º 26
0
    original = getattr(klass, method_name)

    def inner(callback):
        def wrapped(*args, **kwargs):
            return callback(original, *args, **kwargs)

        actual = getattr(original, '__wrapped__', original)
        wrapped.__wrapped__ = actual
        wrapped.__doc__ = getattr(actual, '__doc__', None)
        wrapped.__name__ = actual.__name__
        setattr(klass, method_name, wrapped)
        return wrapped

    return inner


from util.log import Logger
_log = Logger('cache')


@replace_method(BaseDatabaseWrapper, 'cursor')
def cursor(original, self):
    result = original(self)
    return CursorWrapper(result, self, _log)


class CacheMiddleware(object):
    pass

    #self.cursorclass = kwargs2.pop('cursorclass', self.default_cursor)