Exemplo n.º 1
0
    def load(self, dbpath, name):
        '''
            load table
        :return:  self
        '''
        try:
            #initialize table parameters
            self.name = name

            self.path = join_paths(dbpath, name)
            self.table_file = join_paths(self.path, "table")
            self.data_file = join_paths(self.path, "data")

            self.table = self.desc()

            #load data file
            if not is_file(self.data_file):
                #create data file if not exists
                self._create_data_file()
            else:
                #replace old data file if needed
                with open(self.data_file) as fdata:
                    nfields = strips(fdata.readline().split(","))
                    if self.table.nfields() != nfields:
                        if is_subset(nfields, self.table.nfields()):
                            self._upgrade_data_file()
                        else:
                            self._replace_data_file()

            Logger.info("loading table %s...success.", self.name)
            return self
        except Exception as e:
            Logger.info("loading table %s...failed. error: %s", self.name, str(e))
            raise e
Exemplo n.º 2
0
    def handle(self):
        parser = reqparse.RequestParser()
        parser.add_argument('type')
        args = parser.parse_args()

        usage = ''
        if args.type is None:
            return CommonUtil.json_response(-1, '缺少参数:type')
        elif args.type == '0':
            usage = 'regAccount'  # 注册账号
        elif args.type == '1':
            usage = 'findPassword'  # 找回密码
        elif args.type == '2':
            usage = 'adminLogin'  # 管理台登录
        else:
            return CommonUtil.json_response(-1, 'type参数格式错误')

        # 用客户端ip来作为sendId是为了使频繁请求时可以替换这个key下面原来的验证码
        md5 = hashlib.md5()
        md5.update("validimage_{}_{}".format(request.environ['REMOTE_ADDR'],
                                             usage).encode('utf-8'))
        sendId = md5.hexdigest()
        validImage = ValidImage.create()

        Redis.setex(sendId, 60, validImage['code'])

        data = {"img": validImage['img'], "sendId": sendId}

        Logger.log("生成图片验证码 ip:{} sendId:{} code:{}".format(
            request.environ['REMOTE_ADDR'], sendId, validImage['code']))

        return CommonUtil.json_response(0, "success", data)
Exemplo n.º 3
0
def output_json(data, code, headers=None):
    """Makes a Flask response with a JSON encoded body"""
    # 如果是app接口,且不是支付回调,加密后返回
    Logger.log("请求id:%s 响应\n返回JSON:%s\n" % (session['requestId'], data))
    resp = make_response(json.dumps(data), code)
    resp.headers.extend(headers or {})
    return resp
Exemplo n.º 4
0
    def open(self, host, user, pwd, dbn, port=3306):
        '''
            open database or create it if not exist
        :return:
        '''
        #init store path
        try:
            self.host, self.port, self.user, self.pwd = host, port, user, pwd
            self.dbn = dbn
            self.dbc = pymysql.connect(host=host, user=user, passwd=pwd, port=port)

            if not self._exists():
                #create database
                self._create()
                self._use()
            else:
                # load database
                self._use()
                self._load()

            self._rebuild_tindex()

            return self
            Logger.info("open store mysql://%s:%s@%s:%d/%s...success. %d tables.", user, pwd, host, port, self.dbn, len(self.tables))
        except Exception as e:
            Logger.error("open store mysql://%s:%s@%s:%d/%s...failed. error: %s", user, pwd, host, port, self.dbn, str(e))
            raise e
Exemplo n.º 5
0
    def test_print_result(self):
        device = FakeDevice(SourceType.IMAGE, 'dataset/png/')
        # device = FakeDevice(SourceType.IMAGE, '/media/zis/Dados/dev/datasets/putkk.poznan/Dataset_1_Kin_2')

        algorithms = []
        algorithms.append(
            RGBDSegmentation(
                Parameter(Segmentation.GRAPH_CANNY,
                          os.getcwd() + '/results/graph_canny/',
                          resize=(640, 360),
                          fix_proportion=False)))
        #algorithms.append(RGBDSegmentation(Parameter(Segmentation.RGBD_SALIENCY, os.getcwd() + '/results/rgbd_saliency/', resize=(324, 324))))
        #algorithms.append(RGBDSegmentation(Parameter(Segmentation.FCN_TENSORFLOW, os.getcwd() + '/results/fcn_tensorflow/', resize=(224, 224))))
        #algorithms.append(RGBDSegmentation(Parameter(Segmentation.FUSENET, os.getcwd() + '/results/fusenet/', resize=(224, 224))))

        directory_rgb = device.datasetPath + 'rgb/'
        directory_depth = device.datasetPath + 'depth/'

        for algorithm in algorithms:
            Logger.info('############# Testing algorithm ' +
                        str(algorithm.parameter.segmentation))
            for i in range(0, 10):
                frame = RGBDFrame(
                    RGBFrame(directory_rgb, 'rgb_' + format(i, '05') + '.png'),
                    DepthFrame(directory_depth,
                               'depth_' + format(i, '05') + '.png'))

                algorithm.process(frame)
                algorithm.write_results()
                algorithm.finish()
                #break

            algorithm.release()
            break
Exemplo n.º 6
0
    def select(self):
        '''
            select all data from table
        :return:
        '''
        try:
            with Lock(self.lock):
                with open(self.data_file, "r") as fdata:
                    models = []

                    #read field names
                    nfields = strips(fdata.readline().strip().split(","))
                    #read data records
                    data = fdata.readline()
                    while data:
                        data = data.strip()
                        vfields = strips(data.split(","))
                        model = {}
                        for idx in range(0, len(nfields)):
                            model[nfields[idx]] = str2obj(vfields[idx], ',')
                        models.append(model)
                        data = fdata.readline()

                    return models
        except Exception as e:
            Logger.info("select data from table %s...failed. error: %s", self.name, str(e))
            raise e
Exemplo n.º 7
0
def open_script_config(productModel):

    dict_script = {
        "default": "default",  #没有适配的手机
        "Xiaomi": "default",
        "samsung": "default",
        "360": "default",
        "Lenovo": "default",
        "vivo": "default",
        "HUAWEI": "default",
        "OPPO": "default",
        "gionee": "default",
        "xlj": "default",
        "yunso": "default",
        "oysin": "default"
    }

    try:
        if productModel in dict_script:
            return dict_script[productModel]
        else:
            return dict_script['default']
    except:
        log = Logger()
        log.error("model_config.py, open_model_config : get model error")
        return dict_script['default']
Exemplo n.º 8
0
    def create(self, dbc, table):
        '''
            create table
        :return self
        '''
        try:
            #initialize table parameters
            self.dbc = dbc
            self.table = table
            self.name = table.name

            #check if table has exists
            if self._exists_table():
                #exists table
                old_table = self.desc()
                if old_table != self.table:
                    if is_subset(old_table.nfields(), self.table.nfields()):
                        #upgrade table
                        self._upgrade_table()
                    else:
                        #replace table
                        self._replace_table
                else:
                    #table is the same as in database
                    pass
            else:
                #create new table
                self._create_table()

            Logger.info("create table %s...success.", self.name)
            return self
        except Exception as e:
            Logger.error("create table %s...failed. error: %s", self.name,
                         str(e))
            raise e
Exemplo n.º 9
0
    def open(self, path):
        '''
            open store or create it if not exist
        :return:
        '''
        try:
            with Lock(self.lock):
                #init store path
                self.path = path

                if not path_exists(self.path):
                    #create database
                    self._create()
                else:
                    # load database
                    self._load()

                self._rebuild_tindex()

                return self
            Logger.info("open store %s...success. %d tables.", self.path,
                        len(self.tables))
        except Exception as e:
            Logger.error("open store %s...failed. error: %s", self.path,
                         str(e))
            raise e
Exemplo n.º 10
0
    def __init__(self,
                 queue_name,
                 route,
                 persister,
                 wnum,
                 port,
                 logfile=sys.stdout):
        self.queue_name = queue_name
        self.route = route
        self.persister = persister
        self.stop = False
        self.pid = os.getpid()
        self.worker_id = '-'.join(
            ['worker', str(wnum), queue_name, route,
             str(self.pid)])
        self.log = Logger(self.worker_id,
                          logfile=logfile,
                          loglevel=logging.DEBUG)
        self.log.info("starting")

        self.host = socket.gethostbyname(socket.gethostname())
        self.port = port
        self.register()
        self.todo = None
        self.stop = False
Exemplo n.º 11
0
    def create_table(self, table):
        '''
            create table in current database
        :param table:
        :return:
        '''
        with Lock(self.lock):
            # test if the table has loaded
            for t in self.tables:
                if t.table == table:
                    Logger.info("create table %s...exists.", table.name)
                    return

            #create new table
            table = FSTable().create(self.path, table)

            for i in range(0, len(self.tables)):
                t = self.tables[i]
                if t.table.name == table.name:
                    self.tables.pop(i)
                    break

            self.tables.append(table)

            self._rebuild_tindex()
Exemplo n.º 12
0
    def select(self):
        '''
            select all data from table
        :return:
        '''
        try:
            nfields = self.table.nfields()
            sql, models = "select %s from %s;" % (",".join(quotes(
                nfields, '`')), self.name), []
            cursor = self.dbc.cursor()
            cursor.execute(sql)
            results = cursor.fetchall()
            for result in results:
                model = {}
                for idx in range(0, len(result)):
                    nfield = nfields[idx]
                    vfield = result[idx]
                    if isinstance(vfield, str):
                        vfield = unescapes(vfield)
                    model[nfield] = vfield
                models.append(model)
            Logger.info("select from table %s...success", self.name)
            return models

        except Exception as e:
            Logger.error("select from table %s...failed. error %s", self.name,
                         str(e))
            raise e
Exemplo n.º 13
0
    def get_request_token(self):
        Logger.info('Starting to fetch request token for Kite API')
        selenium = SeleniumDispatcher(headless=True)
        driver = selenium.get_driver()
        driver.get(self.kite.login_url())
        time.sleep(4)
        username_field = driver.find_element_by_xpath("//input[@type='text']")
        username_field.send_keys(self.username)
        password_field = driver.find_element_by_xpath("//input[@type='password']")
        password_field.send_keys(self.password)
        password_field.send_keys(Keys.ENTER)
        time.sleep(2)
        pin_field = driver.find_element_by_xpath("//input[@type='password']")
        pin_field.send_keys(self.pin)
        pin_field.send_keys(Keys.ENTER)
        time.sleep(2)
        url = driver.current_url
        parsed = urlparse.urlparse(url)
        token = parse_qs(parsed.query)['request_token'][0]
        Logger.info('Request token received!')
        selenium.destroy_driver()
        return token

        

        
        
Exemplo n.º 14
0
    def indian_business_news(self):
        res = requests.get(self.business_url + '&pageSize=100&apiKey=' +
                           self.api_key).json()
        total_results = res['totalResults']
        self.total_articles = int(total_results)

        # 100 is the max pagesize for newsapi.org
        if total_results <= 100:
            self.insert_data_into_db(res['articles'])
        else:
            # break them in chunks of 10
            number_of_pages = math.ceil(total_results) / 10
            collection = self.client[Config.MONGO_DB][Config.NEWS_COLLECTION]
            for page in range(1, number_of_pages):
                res = requests.get(
                    self.business_url + \
                    '&pageSize=10&page={}&apiKey='.format(page) + \
                    self.api_key).json()
                self.insert_data_into_db(res['articles'])
        if self.inserted_articles > 0:
            Logger.info(
                'News data stored successfully!\nTotal articles: {}\nInserted articles: {}'
                .format(self.total_articles, self.inserted_articles))
        else:
            Logger.info('Total articles: {}\nNot inserted articles: {}'.format(
                self.total_articles, self.not_inserted_articles))
Exemplo n.º 15
0
 def __init__(self, code: CodeBlock):
     self.code = BlockStmt(code)
     self.log = Logger("Algo")
     self.strict_typing = False
     self.callback_stop = lambda: ()
     self.callback_input = None
     self.callback_print = None
     self.map = {
         DisplayStmt: self.exec_display,
         InputStmt: self.exec_input,
         AssignStmt: self.exec_assign,
         IfStmt: self.exec_if,
         ForStmt: self.exec_for,
         WhileStmt: self.exec_while,
         BreakStmt: self.exec_break,
         ContinueStmt: self.exec_continue,
         FuncStmt: self.exec_function,
         ReturnStmt: self.exec_return,
         CallStmt: self.exec_call,
         ElseStmt: self.exec_else,
         BaseStmt: lambda _: (),
         CommentStmt: lambda _: (),
         StopStmt: self.exec_stop,
         SleepStmt: self.exec_sleep
     }
Exemplo n.º 16
0
 def process(self, frame):
     time_elapsed = TimeElapsed()
     self.lastProcessedFrame = frame
     Logger.info('Processing frame - PCL: ' + frame.getFilePath())
     cloud = pcl.load_XYZRGB(frame.getFilePath())
     self.algorithmSegmentation.segment_image(cloud, True)
     time_elapsed.printTimeElapsed()
Exemplo n.º 17
0
def main(args):
    loader = Loader(args.run_name)
    logger = Logger(args.run_name, create_if_exists=False)
    option = loader.load_option()
    layers = [int(l) for l in option["layers"].split(",")]
    params = loader.load_params()

    Model = get_model_cls_by_type(option["type"])
    model = Model(layers, option["nc"], option["omega"])
    model.update_net_params(params)

    if args.size == 0:
        orig_img_fn = loader.get_image_filename("original")
        img = Image.open(orig_img_fn)
        width = img.width
        height = img.height
    else:
        width = args.size
        height = args.size

    estimate_and_save_image(model, width, height, logger)
    if option["nc"] == 1:
        estimate_and_save_gradient(model, width, height, logger)
        estimate_and_save_laplacian(model, width, height, logger)

    if option["size"] != 0:
        # PIL resize as reference
        orig_pil_img = loader.load_pil_image("original")
        resized_pil = orig_pil_img.resize((width, height))
        pil_output_name = "pil_{}x{}".format(width, height)
        logger.save_image(pil_output_name, resized_pil)
Exemplo n.º 18
0
 def send(self, data):
     # Importing it here to avoid error due to circular dependency
     from util.log import Logger
     '''Data can be dict, str, int, list'''
     Logger.info('Sending slack message to: ' + self.webhook_url)
     data = json.dumps({'text': data})
     res = requests.post(self.webhook_url, data)
     return res
Exemplo n.º 19
0
    def __init__(self, expr: str):
        """Initializes the Parser instance.

        expr -- the expression to be parsed"""
        self.expression = expr
        self.tokens = []
        self.index = 0
        self.log = Logger("Parser")
Exemplo n.º 20
0
 def process(self, frame):
     time_elapsed = TimeElapsed()
     self.lastProcessedFrame = frame
     Logger.info('Processing frame - RGB: ' + frame.rgbFrame.getFilePath() + ', Depth: '+frame.depthFrame.getFilePath())
     self.results = self.algorithmSegmentation.segment_image(self.get_image(frame.rgbFrame),
                                                             self.get_image(frame.depthFrame))
     Logger.info('Objects segmented: ' + str(self.algorithmSegmentation.get_num_objects()))
     time_elapsed.printTimeElapsed('Total segmentation - ')
Exemplo n.º 21
0
    def __init__(self):
        md5 = hashlib.md5()
        md5.update(os.urandom(24))
        session['requestId'] = md5.hexdigest()

        Logger.log("请求 请求id:%s\n来源IP:%s\n请求方法:%s\n请求路径:%s\n请求参数:%s" %
                   (session['requestId'], request.environ['REMOTE_ADDR'],
                    request.environ['REQUEST_METHOD'], request.url,
                    json.dumps(request.form)))
        Resource.__init__(self)
Exemplo n.º 22
0
class PipelineLog:
    def __init__(self, logpath):
        self.logger = Logger(logpath)
        self.logpipeout = LogPipe.createAndStart('OUT', self.logger)
        self.logpipeerr = LogPipe.createAndStart('ERROR', self.logger)

    def close(self):
        self.logpipeout.close()
        self.logpipeerr.close()
        self.logger.close()
Exemplo n.º 23
0
 def drop(self):
     '''
         drop table
     :return:
     '''
     try:
         remove_dir(self.path)
     except Exception as e:
         Logger.error("drop table %s...failed. error %s", self.name, str(e))
         raise e
Exemplo n.º 24
0
    def write_results(self):
        time_elapsed = TimeElapsed()
        img = self.write_objects()
        Logger.info('Saving result to ' + self.parameter.outputDir+self.lastProcessedFrame.rgbFrame.fileName)

        if not os.path.exists(self.parameter.outputDir):
            os.makedirs(self.parameter.outputDir)

        cv2.imwrite(self.parameter.outputDir+self.lastProcessedFrame.rgbFrame.fileName, img)
        time_elapsed.printTimeElapsed('Total writing file - ')
Exemplo n.º 25
0
    def util_load(self, file):
        file_name = os.path.basename(file)
        file_name = file_name.replace(".pyc", "_case").replace(".py", "_case")
        name_info = file_name.split("_", 1)

        self.case_id = name_info[0]
        self.case_name = name_info[1]
        self.logger = Logger(self.case_id)
        self.case_info = CaseInfo(self.case_id, self.case_name,
                                  "Functional testing")
        self.case_info.set_log_path(self.logger.log_file_rel_report)
Exemplo n.º 26
0
 def download_data_all(self):
     Logger.info('Download started')
     if self.expiry_strike_price_map is None:
         self.get_expiry_strike_price_map_for_all()
     downloaded_files = list()
     for expiry in self.expiry_strike_price_map.keys():
         for strike_price in self.expiry_strike_price_map[expiry]:
             filepath = self.download_data_specific(expiry, strike_price)
             downloaded_files.append(filepath)
     Logger.info('Download finished')
     return downloaded_files
Exemplo n.º 27
0
 def desc(self):
     '''
            descrite table from store
        :return:  Table
     '''
     try:
         with open(self.table_file) as ftable:
             table = Table().fromstr(ftable.read())
             return table
     except Exception as e:
         Logger.info("describe table %s...failed. error: %s", self.name, str(e))
         raise e
Exemplo n.º 28
0
 def drop(self):
     '''
         drop table
     :return:
     '''
     try:
         sql = "drop table if exists %s;" % self.name
         self.dbc.cursor().execute(sql)
         Logger.info("drop table %s...success", self.name)
     except Exception as e:
         Logger.error("drop table %s...failed. error %s", self.name, str(e))
         raise e
Exemplo n.º 29
0
 def truncate(self):
     '''
         truncate table
     :return:
     '''
     try:
         with Lock(self.lock):
             remove_files(self.data_file)
             self._create_data_file()
     except Exception as e:
         Logger.error("truncate table %s...failed. error %s", self.name, str(e))
         raise e
Exemplo n.º 30
0
    def __init__(self):
        md5 = hashlib.md5()
        md5.update(os.urandom(24))
        session['requestId'] = md5.hexdigest()

        Logger.log(
            ">>>>>>>>>>>>>>>>>>>>>>> 请求 请求id:%s >>>>>>>>>>>>>>>>>>>>>>>\n%s|%s|%s|%s|%s"
            %
            (session['requestId'],
             time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),
             request.environ['REMOTE_ADDR'], request.environ['REQUEST_METHOD'],
             request.url, request.get_data()))
        Resource.__init__(self)
Exemplo n.º 31
0
class Worker(object):
    def __init__(self, queue_name, route, persister, wnum, port,
            logfile=sys.stdout):
        self.queue_name = queue_name
        self.route = route
        self.persister = persister
        self.stop = False
        self.pid = os.getpid()
        self.worker_id = '-'.join(['worker', str(wnum), queue_name, route, str(self.pid)])
        self.log = Logger(self.worker_id,logfile=logfile, loglevel=logging.DEBUG)
        self.log.info("starting")

        self.host = socket.gethostbyname(socket.gethostname())
        self.port = port
        self.register()
        self.todo = None
        self.stop = False

    def register(self):
        self.persister.add_worker(self.worker_id, self.host, self.port)

    def unregister(self):
        self.persister.delete_worker(self.worker_id)

    def start_worker(self):
        while not self.stop:
            if self.todo:
                yield self.work(self.todo)
                self.todo = None

    def stop_worker(self):
        self.log.info("shutting down")
        self.unregister()
        self.stop = True

    def work(self, f):
        self.persister.set_working()
        ret = f()
        self.persister.unset_working()
        return ret
Exemplo n.º 32
0
    def __init__(self, queue_name, route, persister, wnum, port,
            logfile=sys.stdout):
        self.queue_name = queue_name
        self.route = route
        self.persister = persister
        self.stop = False
        self.pid = os.getpid()
        self.worker_id = '-'.join(['worker', str(wnum), queue_name, route, str(self.pid)])
        self.log = Logger(self.worker_id,logfile=logfile, loglevel=logging.DEBUG)
        self.log.info("starting")

        self.host = socket.gethostbyname(socket.gethostname())
        self.port = port
        self.register()
Exemplo n.º 33
0
    def __init__(self, queue_name, routing_keys=None,
            backend='mongodb', conn_url='localhost:27017',
            dbname='skunkqueue', logfile=sys.stdout,
            pidfile=None):
        """
        routing_keys are a required parameter to specify an n-length list
        of routing keys, which will each be assigned to one worker
        """
        self.stop = False
        self.queue_name = queue_name
        self.persister = get_backend(backend)(conn_url=conn_url, dbname=dbname)
        self.log = Logger('pool-'+queue_name, logfile=logfile)

        self.host = socket.gethostbyname(socket.gethostname())

        self.workers = {}

        if pidfile:
            self.log.info("writing to pidfile %s" % pidfile)
            with open(pidfile) as f:
                f.write(str(self.pid))
                f.close()

        # TODO this needs to be in shared memory
        wnums = {}

        self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        self.s.bind((self.host, 0))
        self.s.listen(5)

        self.port = self.s.getsockname()[1]

        for key in routing_keys:
            if key not in wnums:
                wnums[key] = 0
            wnums[key] += 1
            worker = Worker(queue_name, key,
                    self.persister, wnums[key], self.port, logfile)

            thread = KillableThread(target=worker.begin_execution)
            thread.start()
            self.workers[worker.worker_id] = (worker, thread)
Exemplo n.º 34
0
class WorkerPool(object):
    def __init__(self, queue_name, routing_keys=None,
            backend='mongodb', conn_url='localhost:27017',
            dbname='skunkqueue', logfile=sys.stdout,
            pidfile=None):
        """
        routing_keys are a required parameter to specify an n-length list
        of routing keys, which will each be assigned to one worker
        """
        self.stop = False
        self.queue_name = queue_name
        self.persister = get_backend(backend)(conn_url=conn_url, dbname=dbname)
        self.log = Logger('pool-'+queue_name, logfile=logfile)

        self.host = socket.gethostbyname(socket.gethostname())

        self.workers = {}

        if pidfile:
            self.log.info("writing to pidfile %s" % pidfile)
            with open(pidfile) as f:
                f.write(str(self.pid))
                f.close()

        # TODO this needs to be in shared memory
        wnums = {}

        self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        self.s.bind((self.host, 0))
        self.s.listen(5)

        self.port = self.s.getsockname()[1]

        for key in routing_keys:
            if key not in wnums:
                wnums[key] = 0
            wnums[key] += 1
            worker = Worker(queue_name, key,
                    self.persister, wnums[key], self.port, logfile)

            thread = KillableThread(target=worker.begin_execution)
            thread.start()
            self.workers[worker.worker_id] = (worker, thread)

    def __enter__(self, *args, **kwargs):
        self.log.info("starting")
        def gentle(signum, frame):
            self.log.info("Received gentle shutdown signal %d" % signum)
            self.shutdown()
            sys.exit(0)
        def rough(signum, frame):
            self.log.warn("Received non-gentle kill signal %d" % signum)
            self.die()
            sys.exit(0)

        signal.signal(signal.SIGINT,  rough )
        signal.signal(signal.SIGHUP,  gentle)
        signal.signal(signal.SIGTERM, gentle)
        signal.signal(signal.SIGALRM, gentle)
        signal.signal(signal.SIGQUIT, gentle)
        return self

    def __exit__(self, *args, **kwargs):
        self.shutdown()

    def shutdown(self):
        for _id in self.workers.keys():
            worker = self.workers[_id]
            worker[0].stop_worker()
            del self.workers[_id]
        self.s.close()
        self.stop = True

    def die(self):
        for _id in self.workers.keys():
            worker = self.workers[_id]
            try:
                worker[1].raise_exc(TriggeredInterrupt)
                self.log.warn("raised an exception in %s" % str(_id))
                del self.workers[_id]
            except ValueError: # it was dead already
                self.log.debug("ignored killing thread %s" % str(_id))
                continue
        self.s.close()
        self.stop = True

    def run_cmd(self, terminate=None, kill=None, suicide=None):
        # requests a termination
        # terminate looks like: <worker_id>
        if terminate:
            self.workers[terminate][0].stop_worker()
            del self.workers[terminate]

        # performs a hard kill
        # kill looks like: <worker_id> 
        if kill:
            try:
                self.workers[kill][1].raise_exc(TriggeredInterrupt)
                del self.workers[kill]
            except ValueError: # it was already dead
                self.log.warn("tried to kill a thread when it was dead already")

        # shuts down the entire pool
        # suicide looks like: 1 
        if suicide == 1:
            self.stop = True

    def listen(self):
        while not self.stop:
            conn, addr = self.s.accept()
            datalen = conn.recv(2)
            data = ""
            while len(data) < int(datalen):
                data += conn.recv(1024)
            data = json.loads(data)
            self.run_cmd(**data)

            # no need to run the pool if it has no workers
            if len(self.workers) == 0:
                self.shutdown()
Exemplo n.º 35
0
class Worker(object):
    def __init__(self, queue_name, route, persister, wnum, port,
            logfile=sys.stdout):
        self.queue_name = queue_name
        self.route = route
        self.persister = persister
        self.stop = False
        self.pid = os.getpid()
        self.worker_id = '-'.join(['worker', str(wnum), queue_name, route, str(self.pid)])
        self.log = Logger(self.worker_id,logfile=logfile, loglevel=logging.DEBUG)
        self.log.info("starting")

        self.host = socket.gethostbyname(socket.gethostname())
        self.port = port
        self.register()

    def begin_execution(self, *args):
        while not self.stop:
            try:
                job = self.persister.get_job_from_queue(self.queue_name, self.worker_id, self.route)
                if job:
                    self.do_job(job)
                sleep(0.1)
            except TriggeredInterrupt:
                # we were cut off by an interrupt trigger
                self.log.warn("received interrupt request; stopping current job")
                self.log.warn("no result will be committed and this job will not be restarted")
                self.stop_worker()

    def register(self):
        self.persister.add_worker(self.worker_id, self.host, self.port)

    def unregister(self):
        self.persister.delete_worker(self.worker_id)

    def do_job(self, job):
        # depickle
        body = pickle.loads(job['body'])
        directory = body['dir']
        # FIXME a horrible hack where we add ourselves to the pythonpath
        sys.path.append(directory)
        mod = __import__(body['mod'])
        self.log.debug("successfully imported module "+str(mod))

        if job['fn_type'] == 'method':
            parent = dill.loads(body['parent'])
            fn = getattr(parent, body['fn'])
        else:
            fn = getattr(mod, body['fn'])
        args = body['args']
        kwargs = body['kwargs']

        # call it
        self.persister.set_working(self.worker_id)
        try:
            ret = fn(*args, **kwargs)
            self.persister.save_result(job['job_id'], ret, 'complete')
            self.log.info(ret)
        except Exception as e:
            self.persister.save_result(job['job_id'], None, 'error')
            self.log.error(str(e))
            exc_t, exc_v, exc_tr = sys.exc_info()
            self.log.error(str(
                '\n'.join(traceback.format_exception(exc_t, exc_v, exc_tr))))
            self.log.debug("python path is %s" % str(sys.path))
        finally:
            a = sys.path.pop()
        self.persister.unset_working(self.worker_id)

    def stop_worker(self):
        self.log.info("shutting down")
        self.unregister()
        self.stop = True
Exemplo n.º 36
0
    def __init__(self, queue_name, routing_keys=None,
            backend='mongodb', conn_url='localhost:27017',
            dbname='fwbots', logfile=sys.stdout,
            pidfile=None):
        """
        routing_keys are a required parameter to specify an n-length list
        of routing keys, which will each be assigned to one worker

        FWBOTS: we are using routing_keys as the account names to load
        """
        self.stop = False
        self.name = queue_name
        self.log = Logger('pool-'+queue_name, logfile=logfile)
        self.persister = get_backend(backend)(conn_url,dbname)

        self.port = -1 # too lazy to actually remove this

        self.workers = {}
        self.manual = {}
        self.auto = {}
        self.insta = {}

        if pidfile:
            self.log.info("writing to pidfile %s" % pidfile)
            with open(pidfile) as f:
                f.write(str(self.pid))
                f.close()

        # TODO this needs to be in shared memory
        wnums = {}

        self.ircbot = IrcListener(self,"#fwbots",self.name,"irc.freenode.net")

        for key in routing_keys:
            errnum = 0
            try:
                acc = get_twitter_account(key)
                if acc.ty == 'auto':
                    self.log.info('found auto for %s' % key)
                    self.auto[acc.name] = acc
                # if there are multiple manual accs defined, pick only the last one
                elif acc.ty == 'manual':
                    self.log.info('found manual for %s' % key)
                    self.manual[acc.name] = acc
            except NameError:
                errnum += 1
            try:
                acc = get_instagram_account(key)
                self.insta[acc.name] = acc
            except NameError:
                errnum += 1
            if errnum > 1:
                self.log.warn("Could not find any account called %s"%key)

            if key not in wnums:
                wnums[key] = 0
            wnums[key] += 1
            worker = Worker(queue_name, key,
                    self.persister, wnums[key], self.port, logfile)

            thread = KillableThread(target=worker.start_worker)
            thread.start()
            self.workers[worker.worker_id] = (worker, thread)
        # lastly, register yourself
        self.persister.add_pool(self)
Exemplo n.º 37
0
 def __init__(self, logpath):
     self.logger = Logger(logpath)
     self.logpipeout = LogPipe.createAndStart('OUT', self.logger)
     self.logpipeerr = LogPipe.createAndStart('ERROR', self.logger)
Exemplo n.º 38
0
class WorkerPool(object):
    def __init__(self, queue_name, routing_keys=None,
            backend='mongodb', conn_url='localhost:27017',
            dbname='fwbots', logfile=sys.stdout,
            pidfile=None):
        """
        routing_keys are a required parameter to specify an n-length list
        of routing keys, which will each be assigned to one worker

        FWBOTS: we are using routing_keys as the account names to load
        """
        self.stop = False
        self.name = queue_name
        self.log = Logger('pool-'+queue_name, logfile=logfile)
        self.persister = get_backend(backend)(conn_url,dbname)

        self.port = -1 # too lazy to actually remove this

        self.workers = {}
        self.manual = {}
        self.auto = {}
        self.insta = {}

        if pidfile:
            self.log.info("writing to pidfile %s" % pidfile)
            with open(pidfile) as f:
                f.write(str(self.pid))
                f.close()

        # TODO this needs to be in shared memory
        wnums = {}

        self.ircbot = IrcListener(self,"#fwbots",self.name,"irc.freenode.net")

        for key in routing_keys:
            errnum = 0
            try:
                acc = get_twitter_account(key)
                if acc.ty == 'auto':
                    self.log.info('found auto for %s' % key)
                    self.auto[acc.name] = acc
                # if there are multiple manual accs defined, pick only the last one
                elif acc.ty == 'manual':
                    self.log.info('found manual for %s' % key)
                    self.manual[acc.name] = acc
            except NameError:
                errnum += 1
            try:
                acc = get_instagram_account(key)
                self.insta[acc.name] = acc
            except NameError:
                errnum += 1
            if errnum > 1:
                self.log.warn("Could not find any account called %s"%key)

            if key not in wnums:
                wnums[key] = 0
            wnums[key] += 1
            worker = Worker(queue_name, key,
                    self.persister, wnums[key], self.port, logfile)

            thread = KillableThread(target=worker.start_worker)
            thread.start()
            self.workers[worker.worker_id] = (worker, thread)
        # lastly, register yourself
        self.persister.add_pool(self)

    def __enter__(self, *args, **kwargs):
        self.log.info("starting")
        def gentle(signum, frame):
            self.log.info("Received gentle shutdown signal %d" % signum)
            self.shutdown()
            sys.exit(0)
        def rough(signum, frame):
            self.log.warn("Received non-gentle kill signal %d" % signum)
            self.die()
            sys.exit(0)

        signal.signal(signal.SIGINT,  rough )
        signal.signal(signal.SIGHUP,  gentle)
        signal.signal(signal.SIGTERM, gentle)
        signal.signal(signal.SIGALRM, gentle)
        signal.signal(signal.SIGQUIT, gentle)
        return self

    def __exit__(self, *args, **kwargs):
        self.shutdown()

    def shutdown(self):
        for _id in self.workers.keys():
            worker = self.workers[_id]
            worker[0].stop_worker()
            del self.workers[_id]
        self.stop = True
        self.persister.delete_pool(self.name)

    def die(self):
        for _id in self.workers.keys():
            worker = self.workers[_id]
            try:
                worker[1].raise_exc(TriggeredInterrupt)
                self.log.warn("raised an exception in %s" % str(_id))
                del self.workers[_id]
            except ValueError: # it was dead already
                self.log.debug("ignored killing thread %s" % str(_id))
                continue
        self.stop = True

    def work(self, f):
        w = self.persister.get_avail_workers()[0]
        w.todo = f

    def run_cmd(self, terminate=None, kill=None, suicide=None,
            tweet=None,rtp=None):
        # requests a termination
        # terminate looks like: <worker_id>
        if terminate:
            self.workers[terminate][0].stop_worker()
            del self.workers[terminate]

        # performs a hard kill
        # kill looks like: <worker_id> 
        if kill:
            try:
                self.workers[kill][1].raise_exc(TriggeredInterrupt)
                del self.workers[kill]
            except ValueError: # it was already dead
                self.log.warn("tried to kill a thread when it was dead already")

        # shuts down the entire pool
        # suicide looks like: 1 
        if suicide == 1:
            self.stop = True

        # sends a tweet from self.manual
        # tweet looks like: <msg>
        if tweet:
            self.log.info("tweeting with manual account %s: '%s'"%(self.manual.name,tweet))
            self.work(lambda: self.manual.make_tweet(tweet))
            # if rtp is set, have dummies retweet it with probability rtp
            if rtp:
                def f():
                    for a in self.auto:
                        if rtp < random.random():
                            a.retweet_last(self.manual.name)
                self.work(f)

    def listen(self):
        self.ircbot.start()