Exemple #1
0
 def initLog(self):
     self._dlog = AcrcloudLogger("RecManager", logging.INFO)
     if not self._dlog.addFilehandler(logfile="RecWorker.log",
                                      logdir=self._config['log']['dir']):
         self.exitRecM('rec_error#0#init_flog_error')
     if not self._dlog.addStreamHandler():
         self.exitRecM('rec_error#0#init_slog_error')
 def init_log(self):
     self.dlog = AcrcloudLogger('ACRCloud_ScanF', logging.INFO)
     if not self.dlog.addFilehandler(logfile="log_scan_files.log",
                                     logdir="./",
                                     loglevel=logging.WARN):
         sys.exit(1)
     if not self.dlog.addStreamHandler():
         sys.exit(1)
 def initLog(self):
     self.colorfmt = "$MAGENTA%(asctime)s - $RED%(name)-20s$RESET - $COLOR%(levelname)-8s$RESET - $COLOR%(message)s$RESET"
     self.dlog = AcrcloudLogger('Monitor@Main', logging.INFO)
     if not self.dlog.addFilehandler(logfile="Monitor.log",
                                     logdir=self.config["log"]["dir"]):
         sys.exit(1)
     if not self.dlog.addStreamHandler(self.colorfmt):
         sys.exit(1)
 def initLog(self):
     self._dlog = AcrcloudLogger("SWorker_{0}.log".format(self._stream_id),
                                 logging.INFO)
     if not self._dlog.addFilehandler(logfile="SWorker_{0}.log".format(
             self._stream_id),
                                      logdir=self._config['log']['dir']):
         sys.exit(1)
     if not self._dlog.addStreamHandler():
         sys.exit(1)
Exemple #5
0
 def initLog(self):
     self._dlog = AcrcloudLogger("RecPool_{0}".format(self._rec_pool_id),
                                 logging.INFO)
     if not self._dlog.addFilehandler(logfile="RecPool_{0}.log".format(
             self._rec_pool_id),
                                      logdir=self._config['log']['dir']):
         self.exitRecM(
             'rec_error#0#init_flog_error, rec_pool_id:{0}'.format(
                 self._rec_pool_id))
     if not self._dlog.addStreamHandler():
         self.exitRecM(
             'rec_error#0#init_slog_error, rec_pool_id:{0}'.format(
                 self._rec_pool_id))
 def initLog(self):
     self.colorfmt = "$MAGENTA%(asctime)s - $RED%(name)-20s$RESET - $COLOR%(levelname)-8s$RESET - $COLOR%(message)s$RESET"
     self.dlog = AcrcloudLogger('Monitor@Main', logging.INFO)
     if not self.dlog.addFilehandler(logfile = "Monitor.log", logdir = self.config["log"]["dir"]):
         sys.exit(1)
     if not self.dlog.addStreamHandler(self.colorfmt):
         sys.exit(1)
class AcrcloudSpringboard:

    def __init__(self, manager, config, dworker, rworker, sworker):
        self.manager = manager
        self.config = config
        self.dworker = dworker
        self.rworker = rworker
        self.sworker = sworker
        self.access_key = self.config['user']['access_key']
        #self.access_secret = self.config['user']['access_secret']
        self.api_url = self.config['user']['api_url']
        self.stream_ids = self.config.get("stream_ids", [])
        self.record = int(self.config['record']['record'])
        self.record_before = int(self.config['record']['record_before'])
        self.record_after = int(self.config['record']['record_after'])
        self.addkeys =['access_key','access_secret','rec_host','stream_id','stream_url',
                       'interval','monitor_length','monitor_timeout','rec_timeout']
        self.mainQueue = multiprocessing.Queue()
        self.shareStatusDict = multiprocessing.Manager().dict()
        self.shareMonitorDict = multiprocessing.Manager().dict()
        self.shareDict = multiprocessing.Manager().dict()
        self.initLog()
        self.initManager()
        self.initStreams()

    def initLog(self):
        self.colorfmt = "$MAGENTA%(asctime)s - $RED%(name)-20s$RESET - $COLOR%(levelname)-8s$RESET - $COLOR%(message)s$RESET"
        self.dlog = AcrcloudLogger('Acrcloud@Springboard', logging.INFO)
        if not self.dlog.addStreamHandler(self.colorfmt):
            sys.exit(1)

    def initManager(self):
        try:
            self.manager_proc = multiprocessing.Process(target = self.manager,
                                                        args = (self.mainQueue,
                                                                self.config,
                                                                self.shareMonitorDict,
                                                                self.shareStatusDict,
                                                                self.shareDict,
                                                                self.dworker,
                                                                self.rworker,
                                                                self.sworker))
            self.manager_proc.start()
            if not self.manager_proc.is_alive():
                self.dlog.logger.error('Error@Springboard:create manager process failed, it will stop')
                sys.exit(1)
            else:
                self.dlog.logger.warn('Warn@Springboard:manager init success')
        except Exception as e:
            self.dlog.logger.error('Error@Springboard:init manager failed, it will stop', exc_info=True)
            sys.exit(1)

    def checkInfo(self, info):
        if len(info) >= 8:
            for key in self.addkeys:
                if info.get(key, 'None') == 'None':
                    return False
            return True
        return False

    def changeStat(self, id, index, msg):
        stat = self.shareStatusDict[id]
        stat[index] = msg
        self.shareStatusDict[id] = stat

    def changeMon(self, id, index, value):
        tmp = self.shareMonitorDict[id]
        tmp[index] = value
        self.shareMonitorDict[id] = tmp

    def getPage(self, url, referer=None):
        response = ''
        for i in range(2):
            request = urllib2.Request(url)
            request.add_header("User-Agent", "Mozilla/5.0 (Windows NT 6.1; rv:14.0) Gecko/20100101 Firefox/14.0.1")
            if referer:
                request.add_header("Referer", referer)
            try:
                response = urllib2.urlopen(request)
                if response:
                    result = response.read()
                    response.close()
                    return result
            except Exception, e:
                traceback.print_exc()
                if response:
                    response.close()
        return ''
class AcrcloudMonitor:
    def __init__(self, mainQueue, config, shareMonitorDict, shareStatusDict,
                 shareDict, dworker, rworker, sworker):
        self.recQueue = multiprocessing.Queue()
        self.recMainQueue = multiprocessing.Queue()
        self.resultQueue = multiprocessing.Queue()
        self.resMainQueue = multiprocessing.Queue()
        self.springQueue = mainQueue
        self.config = config
        self.shareMonitorDict = shareMonitorDict
        self.shareStatusDict = shareStatusDict
        self.shareDict = shareDict
        self.procDict = dict()
        self.dworker = dworker
        self.rworker = rworker
        self.sworker = sworker
        self.initLog()
        self.initRec()
        self.initRes()
        self.initFresh()

    def initLog(self):
        self.colorfmt = "$MAGENTA%(asctime)s - $RED%(name)-20s$RESET - $COLOR%(levelname)-8s$RESET - $COLOR%(message)s$RESET"
        self.dlog = AcrcloudLogger('Monitor@Main', logging.INFO)
        if not self.dlog.addFilehandler(logfile="Monitor.log",
                                        logdir=self.config["log"]["dir"]):
            sys.exit(1)
        if not self.dlog.addStreamHandler(self.colorfmt):
            sys.exit(1)

    def initRec(self):
        self.recproc = multiprocessing.Process(
            target=self.rworker,
            args=(self.recMainQueue, self.recQueue, self.resultQueue,
                  self.shareDict, self.config))
        self.recproc.start()
        if not self.recproc.is_alive():
            self.dlog.logger.error(
                '[email protected]_recognize.failed')
            sys.exit(1)
        else:
            self.dlog.logger.warn(
                '[email protected]_recognize.success')

    def initRes(self):
        self.resproc = multiprocessing.Process(target=self.sworker,
                                               args=(self.resMainQueue,
                                                     self.resultQueue,
                                                     self.config))
        self.resproc.start()
        if not self.resproc.is_alive():
            self.dlog.logger.error('[email protected]_result.failed')
            sys.exit(1)
        else:
            self.dlog.logger.warn('[email protected]_result.success')

    def initFresh(self):
        self.fresh_proc = Worker_AutoF(self.config, self.dlog)
        self.fresh_proc.start()
        if not self.fresh_proc.is_alive():
            self.dlog.logger.error('[email protected]_fresh.failed')
            sys.exit(1)
        else:
            self.dlog.logger.warn('[email protected]_fresh.success')

    def checkInfo(self, info):
        if len(info) >= 8:
            for key in self.addkeys:
                if info.get(key, 'None') == 'None':
                    return False
            return True
        return False

    def changeStat(self, id, index, msg):
        stat = self.shareStatusDict[id]
        stat[index] = msg
        self.shareStatusDict[id] = stat

    def startMonitor(self):
        try:
            for stream_id in self.shareMonitorDict.keys():
                jsoninfo = self.shareMonitorDict[stream_id][0]
                self.addMonitor(jsoninfo)
                time.sleep(0.5)
        except Exception as e:
            self.dlog.logger.error('*****@*****.**',
                                   exc_info=True)

    def addMonitor(self, jsoninfo):
        try:
            stream_id = jsoninfo.get('stream_id')
            if stream_id in self.shareMonitorDict and stream_id not in self.procDict:
                mainqueue = multiprocessing.Queue()
                proc = multiprocessing.Process(
                    target=self.dworker,
                    args=(jsoninfo, mainqueue, self.recQueue,
                          self.shareStatusDict, self.shareDict, self.config))
                proc.daemon = True
                proc.start()
                if proc.is_alive():
                    self.procDict[stream_id] = [proc, mainqueue]
                    self.dlog.logger.warn('ADD Monitor ({0}, {1})'.format(
                        jsoninfo['stream_id'], jsoninfo['stream_url']))
                    return True
        except Exception as e:
            self.dlog.logger.error('[email protected] Monitor Error:',
                                   exc_info=True)
        self.dlog.logger.error(
            '[email protected] Monitor Failed ({0}, {1})'.format(
                jsoninfo['stream_id'], jsoninfo['stream_url']))
        return False

    def delMonitor(self, jsoninfo):
        try:
            stream_id = jsoninfo.get('stream_id')
            proc, mainqueue = self.procDict[stream_id]
            mainqueue.put('STOP')
            proc.join()
            if not proc.is_alive():
                #del self.shareStatusDict[stream_id]
                self.shareStatusDict[stream_id] = ['10#delete', '2#unknow']
                del self.procDict[stream_id]
                self.dlog.logger.warn('DEL Monitor ({0}, {1})'.format(
                    jsoninfo['stream_id'], jsoninfo['stream_url']))
                return True
        except Exception as e:
            self.dlog.logger.error('[email protected] Monitor Error:',
                                   exc_info=True)
        self.dlog.logger.error(
            '[email protected] Monitor Failed ({0}, {1})'.format(
                jsoninfo['stream_id'], jsoninfo['stream_url']))
        return False

    def delAllM(self):
        try:
            for stream_id in self.procDict.keys():
                if self.delMonitor({'stream_id': stream_id, 'stream_url': ''}):
                    del self.shareMonitorDict[stream_id]
        except Exception as e:
            self.dlog.logger.error('Del All Monitors Error', exc_info=True)

    def reStart(self, jsoninfo):
        try:
            stream_id = jsoninfo.get('stream_id')
            if stream_id in self.shareMonitorDict:
                code, msg = self.shareStatusDict[stream_id][0].split('#')
                proc, mainqueue = self.procDict[stream_id]
                info, createTime = self.shareMonitorDict[stream_id][:2]
                if code == '4' or code == '3' or code == '6':
                    if proc.is_alive():
                        mainqueue.put('RESTART')
                        self.changeStat(stream_id, 0, '0#restart0')
                        self.dlog.logger.warn(
                            'Restart Monitor ({0}, {1}).'.format(
                                jsoninfo['stream_id'], jsoninfo['stream_url']))
                        return True
                    else:
                        proc = multiprocessing.Process(
                            target=self.dworker,
                            args=(info, mainqueue, self.recQueue,
                                  self.shareStatusDict, self.shareDict,
                                  self.config))
                        proc.daemon = True
                        proc.start()
                        if proc.is_alive():
                            self.procDict[stream_id][0] = proc
                            self.changeStat(stream_id, 0, '0#restart1')
                            self.dlog.logger.warn(
                                'Restart Monitor ({0}, {1}).'.format(
                                    jsoninfo['stream_id'],
                                    jsoninfo['stream_url']))
                            return True
        except Exception as e:
            self.dlog.logger.error(
                '[email protected] Monitor Error:', exc_info=True)
        self.dlog.logger.error(
            '[email protected] Monitor Failed ({0}, {1}).'.format(
                jsoninfo['stream_id'], jsoninfo['stream_url']))
        return False

    def refresh(self):
        try:
            for stream_id in self.shareMonitorDict.keys():
                jsoninfo, createTime, value = self.shareMonitorDict[stream_id]
                if value == 1:
                    self.dlog.logger.warn(
                        '[email protected]: {0} - Update'.
                        format(jsoninfo.get('stream_id', '')))
                    self.delMonitor(jsoninfo)
                    self.addMonitor(jsoninfo)
                elif value == 2:
                    self.dlog.logger.warn(
                        '[email protected]: {0} - New Add'.
                        format(jsoninfo.get('stream_id', '')))
                    self.addMonitor(jsoninfo)
                elif value == 3:
                    self.dlog.logger.warn(
                        '[email protected]: {0} - Delete'.
                        format(jsoninfo.get('stream_id', '')))
                    self.delMonitor(jsoninfo)
                    del self.shareMonitorDict[stream_id]
                time.sleep(1)
        except Exception as e:
            self.dlog.logger.error(
                '[email protected] Monitor Error:', exc_info=True)

    def pauseM(self, jsoninfo):
        try:
            stream_id = jsoninfo.get('stream_id')
            if stream_id in self.shareMonitorDict:
                code, msg = self.shareStatusDict[stream_id][0].split('#')
                if code == '0' or code == '1':
                    proc, mainqueue = self.procDict[stream_id]
                    mainqueue.put('PAUSE')
                    self.dlog.logger.warn('PAUSE Monitor ({0}, {1}).'.format(
                        jsoninfo['stream_id'], jsoninfo['stream_url']))
                    return True
        except Exception as e:
            self.dlog.logger.error(
                '[email protected] Monitor Error:', exc_info=True)
        self.dlog.logger.error(
            '[email protected] Monitor Failed ({0}, {1}).'.format(
                jsoninfo['stream_id'], jsoninfo['stream_url']))
        return False

    def doIt(self, cmd, info):
        try:
            if cmd == 'heiheihei':
                self.startMonitor()
            elif cmd == 'refresh':
                self.refresh()
            elif cmd == 'restart':
                self.reStart(info)
            elif cmd == 'pause':
                self.pauseM(info)
            elif cmd == 'stop':
                self.stop()
        except Exception as e:
            self.dlog.logger.error("doIt Error:", exc_info=True)

    def start(self):
        self._running = True
        while 1:
            if not self._running:
                break
            try:
                cmd, info = self.springQueue.get()
            except Queue.Empty:
                continue
            self.doIt(cmd, info)
            time.sleep(0.1)

    def stop(self):
        self.delAllM()
        self.dlog.logger.warn('Warn@Acrcloud_Manager.DelAllMontirs_Success')
        self.recMainQueue.put(('stop', ''))
        self.resMainQueue.put(('stop', ''))
        self._running = False
        self.dlog.logger.warn('Warn@Acrcloud_Manager_Stop')
        sys.exit(1)
Exemple #9
0
class ACRCloud_Scan_Files:
    def __init__(self, config_file):
        self.config = {
            'host': '',
            'access_key': '',
            'access_secret': '',
            'debug': False,
            'timeout': 10  # seconds
        }
        self.openpyxl_version = ".".join(
            str(openpyxl.__version__).split(".")[:2])
        self.config_file = config_file
        self.init_log()
        self.init_config()

    def init_log(self):
        self.dlog = AcrcloudLogger('ACRCloud_ScanF', logging.INFO)
        if not self.dlog.addFilehandler(logfile="log_scan_files.log",
                                        logdir="./",
                                        loglevel=logging.WARN):
            sys.exit(1)
        if not self.dlog.addStreamHandler():
            sys.exit(1)

    def init_config(self):
        try:
            json_config = None
            with codecs.open(self.config_file, 'r') as f:
                json_config = json.loads(f.read())
            for k in ["host", "access_key", "access_secret"]:
                if k in json_config and json_config[k].strip():
                    self.config[k] = str(json_config[k].strip())
                else:
                    self.dlog.logger.error(
                        "init_config.not found {0} from config.json, pls check"
                        .format(k))
                    sys.exit(1)

            self.re_handler = ACRCloudRecognizer(self.config)
            if self.re_handler:
                self.dlog.logger.warning("init_config success!")
        except Exception as e:
            self.dlog.logger.error("init_config.error", exc_info=True)

    def read_file(self, infile, jFirst=True):
        with open(infile, "rb") as rfile:
            for line in rfile:
                if jFirst:
                    jFirst = False
                    continue
                yield line.strip()

    def write_error(self, file_path, error_time, error_detail):
        with open(
                'error_scan.txt',
                'a',
        ) as f:
            msg = file_path + '||' + str(error_time) + '||' + str(
                error_detail) + '\n'
            f.write(msg)

    def empty_error_scan(self):
        if os.path.exists('error_scan.txt'):
            os.remove('error_scan.txt')

    def export_to_csv(self,
                      result_list,
                      export_filename="ACRCloud_ScanFile_Results.csv",
                      export_dir="./"):
        try:
            results = []
            for item in result_list:
                filename = item["file"]
                timestamp = item["timestamp"]
                jsoninfo = item["result"]
                if "status" in jsoninfo and jsoninfo["status"]["code"] == 0:
                    row = self.parse_data(jsoninfo)
                    row = [filename, timestamp] + list(row)
                    results.append(row)

            export_filepath = os.path.join(export_dir, export_filename)

            with codecs.open(export_filepath, 'w', 'utf-8-sig') as f:
                head_row = [
                    'filename', 'timestamp', 'title', 'artists', 'album',
                    'acrid', 'played_duration', 'label', 'isrc', 'upc',
                    'deezer', 'spotify', 'itunes', 'youtube',
                    'custom_files_title', 'audio_id'
                ]
                dw = csv.writer(f)
                dw.writerow(head_row)
                dw.writerows(results)
                self.dlog.logger.info(
                    "export_to_csv.Save Data to csv: {0}".format(
                        export_filepath))
        except Exception as e:
            self.dlog.logger.error("Error export_to_csv", exc_info=True)

    def export_to_json(self,
                       result_list,
                       export_filename="ACRCloud_ScanFile_Results.json",
                       export_dir="./"):
        try:
            results = []
            json_results = []
            new_results = {}
            export_filepath = os.path.join(export_dir, export_filename)

            head_row = [
                'filename', 'timestamp', 'title', 'artists', 'album', 'acrid',
                'played_duration', 'label', 'isrc', 'upc', 'deezer', 'spotify',
                'itunes', 'youtube', 'custom_files_title', 'audio_id'
            ]

            for item in result_list:
                filename = item["file"]
                timestamp = item["timestamp"]
                jsoninfo = item["result"]
                if "status" in jsoninfo and jsoninfo["status"]["code"] == 0:
                    row = self.parse_data(jsoninfo)
                    row = [filename, timestamp] + list(row)
                    results.append(row)

            for i in results:
                for k in range(len(head_row)):
                    new_results[head_row[k]] = i[k]

                json_results.append(new_results)

            with codecs.open(export_filepath, 'w', 'utf-8-sig') as f:
                f.write(json.dumps(json_results))
        except Exception as e:
            self.dlog.logger.error("Error export_to_json", exc_info=True)

    def export_to_xlsx(self,
                       result_list,
                       export_filename="ACRCloud_ScanFile_Results.xlsx",
                       export_dir="./"):
        try:
            wb = Workbook()
            sheet_channels = wb.active
            sheet_channels.title = "Results"
            head_row = [
                'filename', 'timestamp', 'title', 'artists', 'album', 'acrid',
                'played_duration', 'label', 'isrc', 'upc', 'deezer', 'spotify',
                'itunes', 'youtube', 'custom_files_title', 'audio_id'
            ]
            sheet_channels.append(head_row)

            for item in result_list:
                filename = item["file"]
                timestamp = item["timestamp"]
                jsoninfo = item["result"]
                if "status" in jsoninfo and jsoninfo["status"]["code"] == 0:
                    row = self.parse_data(jsoninfo)
                    row = [filename, timestamp] + list(row)
                    sheet_channels.append(row)

            export_filepath = os.path.join(export_dir, export_filename)

            for column_cells in sheet_channels.columns:
                length = max(
                    len(str(cell.value) if cell.value else "")
                    for cell in column_cells)
                if length > 100:
                    length == 100
                if self.openpyxl_version >= "2.6":
                    sheet_channels.column_dimensions[
                        column_cells[0].column_letter].width = length
                else:
                    sheet_channels.column_dimensions[
                        column_cells[0].column].width = length
            wb.save(export_filepath)

            self.dlog.logger.info(
                "export_to_xlsx.Save Data to xlsx: {0}".format(
                    export_filepath))
        except Exception as e:
            self.dlog.logger.error("Error export_to_xlsx", exc_info=True)

    def parse_data(self, jsoninfo):
        try:
            title, played_duration, isrc, upc, acrid, label, album = [""] * 7
            artists, deezer, spotify, itunes, youtube, custom_files_title, audio_id = [
                ""
            ] * 7

            metadata = jsoninfo.get('metadata', {})
            played_duration = metadata.get("played_duration", "")
            if "music" in metadata and len(metadata["music"]) > 0:
                item = metadata["music"][0]
                title = item.get("title", "")
                offset = item.get("play_offset_ms", "")
                if "external_ids" in item:
                    if "isrc" in item["external_ids"]:
                        isrc_obj = item["external_ids"]["isrc"]
                        isrc = isrc_obj[0] if type(
                            isrc_obj) == list else isrc_obj
                    if "upc" in item["external_ids"]:
                        upc_obj = item["external_ids"]["upc"]
                        upc = upc_obj[0] if type(upc_obj) == list else upc_obj
                acrid = item.get("acrid", "")
                label = item.get("label", "")
                album = item.get("album", {"name": ""}).get("name", "")
                artists = ",".join([
                    ar["name"] for ar in item.get('artists', [{
                        "name": ""
                    }]) if ar.get("name")
                ])
                if "external_metadata" in item:
                    e_metadata = item["external_metadata"]
                    if "deezer" in e_metadata:
                        deezer_obj = e_metadata["deezer"]
                        deezer = deezer_obj[0]["track"]["id"] if type(
                            deezer_obj) == list else deezer_obj["track"]["id"]
                    if "spotify" in e_metadata:
                        spotify_obj = e_metadata["spotify"]
                        spotify = spotify_obj[0]["track"]["id"] if type(
                            spotify_obj
                        ) == list else spotify_obj["track"]["id"]
                    if "youtube" in e_metadata:
                        youtube_obj = e_metadata["youtube"]
                        youtube = youtube_obj[0]["vid"] if type(
                            youtube_obj) == list else youtube_obj["vid"]

            if "custom_files" in metadata and len(
                    metadata["custom_files"]) > 0:
                custom_item = metadata["custom_files"][0]
                custom_files_title = custom_item.get("title", "")
                audio_id = custom_item.get("audio_id", "")
        except Exception as e:
            self.dlog.logger.error(
                "parse_data.error.data:{0}".format(metadata), exc_info=True)

        res = (title, artists, album, acrid, played_duration, label, isrc, upc,
               deezer, spotify, itunes, youtube, custom_files_title, audio_id)
        return res

    def apply_filter(self, results):
        fworker = FilterWorker()
        result_new = fworker.apply_filter(results)
        return result_new

    def do_recognize(self, filepath, start_time, rec_length):
        try:
            current_time = time.strftime('%H:%M:%S', time.gmtime(start_time))
            res_data = self.re_handler.recognize_by_file(
                filepath, start_time, rec_length)
            return filepath, current_time, res_data
        except Exception as e:
            self.dlog.logger.error("do_recognize.error.({0}, {1}, {2})".format(
                filepath, start_time, rec_length),
                                   exc_info=True)
        return filepath, current_time, None

    def recognize_file(self,
                       filepath,
                       start_time,
                       stop_time,
                       step,
                       rec_length,
                       with_duration=0):
        self.dlog.logger.warning(
            "scan_file.start_to_run: {0}".format(filepath))

        result = []
        for i in range(start_time, stop_time, step):
            filep, current_time, res_data = self.do_recognize(
                filepath, i, rec_length)
            try:
                print(res_data)
                jsoninfo = json.loads(res_data)
                code = jsoninfo['status']['code']
                msg = jsoninfo['status']['msg']
                if "status" in jsoninfo and jsoninfo["status"]["code"] == 0:
                    result.append({
                        "timestamp": current_time,
                        "rec_length": rec_length,
                        "result": jsoninfo,
                        "file": filep
                    })
                    res = self.parse_data(jsoninfo)
                    # self.dlog.logger.info('recognize_file.(time:{0}, title: {1})'.format(current_time, res[0]))
                    self.dlog.logger.info(
                        'recognize_file.(time:{0}, title: {1}, custom title: {2})'
                        .format(current_time, res[0], res[-2]))
                if code == 2005:
                    self.dlog.logger.warning(
                        'recognize_file.(time:{0}, code:{1}, Done!)'.format(
                            current_time, code))
                    break
                elif code == 1001:
                    result.append({
                        "timestamp": current_time,
                        "rec_length": rec_length,
                        "result": jsoninfo,
                        "file": filep
                    })
                    self.dlog.logger.info(
                        "recognize_file.(time:{0}, code:{1}, No_Result)".
                        format(current_time, code))
                elif code == 3001:
                    self.dlog.logger.error(
                        'recognize_file.(time:{0}, code:{1}, Missing/Invalid Access Key)'
                        .format(current_time, code))
                    break
                elif code == 3003:
                    self.dlog.logger.error(
                        'recognize_file.(time:{0}, code:{1}, Limit exceeded)'.
                        format(current_time, code))
                elif code == 3000:
                    self.dlog.logger.error(
                        'recognize_file.(time:{0}, {1}, {2})'.format(
                            current_time, code, msg))
                    self.write_error(filepath, i, 'NETWORK ERROR')
                i += step
            except Exception as e:
                self.dlog.logger.error('recognize_file.error', exc_info=True)
                self.write_error(filepath, i, 'JSON ERROR')
        return result

    def scan_file_main(self, option, start_time, stop_time):
        try:
            filepath = option.file_path
            step = option.step
            rec_length = option.rec_length
            with_duration = option.with_duration
            out_dir = option.out_dir
            if out_dir and not os.path.exists(out_dir):
                try:
                    os.makedirs(out_dir)
                except Exception as e:
                    self.dlog.logger.error(
                        "scan_file_main.create_out_dir_error:{0}, please check it!"
                        .format(out_dir),
                        exc_info=True)
                    return

            file_type = option.file_type
            if start_time == 0 and stop_time == 0:
                file_total_seconds = int(
                    ACRCloudRecognizer.get_duration_ms_by_file(filepath) /
                    1000)
                results = self.recognize_file(filepath, start_time,
                                              file_total_seconds, step,
                                              rec_length, with_duration)
            else:
                results = self.recognize_file(filepath, start_time, stop_time,
                                              step, rec_length, with_duration)

            filename_csv = 'result-' + os.path.basename(
                filepath.strip()) + '.csv'
            filename_xlsx = 'result-' + os.path.basename(
                filepath.strip()) + '.xlsx'
            filename_json = 'result-' + os.path.basename(
                filepath.strip()) + '.json'

            if results:
                if file_type == "csv":
                    self.export_to_csv(results, filename_csv, out_dir)
                elif file_type == "json":
                    self.export_to_json(results, filename_json, out_dir)
                else:
                    self.export_to_xlsx(results, filename_xlsx, out_dir)

            if with_duration == 1:
                new_results = []
                if results:
                    new_results = self.apply_filter(results)

                filename_with_duration_csv = 'result-' + os.path.basename(
                    filepath.strip()) + '_with_duration.csv'
                filename_with_duration_xlsx = 'result-' + os.path.basename(
                    filepath.strip()) + '_with_duration.xlsx'
                filename_with_duration_json = 'result-' + os.path.basename(
                    filepath.strip()) + '_with_duration.json'

                if file_type == "csv":
                    self.export_to_csv(new_results, filename_with_duration_csv,
                                       out_dir)
                elif file_type == "json":

                    self.export_to_json(new_results,
                                        filename_with_duration_json, out_dir)
                else:
                    self.export_to_xlsx(new_results,
                                        filename_with_duration_xlsx, out_dir)
        except Exception as e:
            self.dlog.logger.error("scan_file_main.error", exc_info=True)
        return

    def scan_folder_main(self, option, start_time, stop_time):
        try:
            path = option.folder_path
            file_list = os.listdir(path)
            for i in file_list:
                option.file_path = path + '/' + i
                self.scan_file_main(option, start_time, stop_time)
        except Exception as e:
            self.dlog.logger.error("scan_folder_main.error", exc_info=True)
Exemple #10
0
class Acrcloud_Rec_Manager:
    def __init__(self, mainqueue, recqueue, resultqueue, shareDict, config):
        self._mainQueue = mainqueue
        self._recQueue = recqueue
        self._resultQueue = resultqueue
        self._shareDict = shareDict
        self._config = config
        self._recognizer = None
        self._workerpool = []
        self.initLog()
        self.initConfig()
        self.initWorkers(self._init_nums)

    def initLog(self):
        self._dlog = AcrcloudLogger("RecManager", logging.INFO)
        if not self._dlog.addFilehandler(logfile="RecWorker.log",
                                         logdir=self._config['log']['dir']):
            self.exitRecM('rec_error#0#init_flog_error')
        if not self._dlog.addStreamHandler():
            self.exitRecM('rec_error#0#init_slog_error')

    def initConfig(self):
        #self._host = self._config['recognize']['host']
        #self._query_type = self._config['recognize']['query_type']
        init_nums_map = {'4core': 20, '8core': 30, '16core': 40, '32core': 60}
        cpu_core = multiprocessing.cpu_count()
        self._init_nums = init_nums_map.get(str(cpu_core) + 'core', 20)
        self._worker_num = 0

        #####################
        ## init recognizer ##
        #####################
        self._recognizer = acrcloud_recognize(self._dlog)
        if not self._recognizer:
            self._dlog.logger.error('init recognize error')
            self.exitRecM('rec_error#1#init_recognize_error')

    def initWorkers(self, new_nums):
        try:
            for i in range(new_nums):
                rechandler = Acrcloud_Rec_Worker(
                    self._worker_num, self._shareDict, self._recognizer,
                    self._recQueue, self._resultQueue, self._dlog.logger)
                rechandler.start()
                self._workerpool.append((self._worker_num, rechandler))
                self._worker_num += 1
            self._dlog.logger.warn(
                'Warn@Acrcloud_Rec_Worker(Num:{0}).Init_Success'.format(
                    new_nums))
        except Exception as e:
            self._dlog.logger.error('Error@Init_Rec_Workers', exc_info=True)
            self.exitRecM('rec_error#3#init_rec_workers_error')

    def delWorkers(self):
        try:
            count = 0
            for id, handler in self._workerpool:
                handler.stop()
                self._worker_num -= 1
                count += 1
            self._dlog.logger.warning(
                'Warn@Del_Rec_Workers_Success.(Totle Num:{0})'.format(count))
        except Exception as e:
            self._dlog.logger.error('Error@Del_Rec_Workers', exc_info=True)
            self._mainQueue.put('rec_error#4#del_rec_workers_error')

    def start(self):
        self._running = True
        while 1:
            if not self._running:
                break
            try:
                cmdinfo = self._mainQueue.get()
            except Queue.Empty:
                time.sleep(2)
            if cmdinfo[0] == 'stop':
                self.stop()

    def stop(self):
        self.delWorkers()
        self._running = False
        self._dlog.logger.warn('Warn@Acrcloud_Recoginze_Stop')
 def init_log(self):
     self.dlog = AcrcloudLogger('ACRCloud_ScanF', logging.INFO)
     if not self.dlog.addFilehandler(logfile = "log_scan_files.log", logdir = "./", loglevel = logging.WARN):
         sys.exit(1)
     if not self.dlog.addStreamHandler():
         sys.exit(1)
class AcrcloudMonitor:

    def __init__(self, mainQueue, config,
                 shareMonitorDict,
                 shareStatusDict,
                 shareDict,
                 dworker,
                 rworker,
                 sworker):
        self.recQueue = multiprocessing.Queue()
        self.recMainQueue = multiprocessing.Queue()
        self.resultQueue= multiprocessing.Queue()
        self.resMainQueue = multiprocessing.Queue()
        self.springQueue = mainQueue
        self.config  = config
        self.shareMonitorDict = shareMonitorDict
        self.shareStatusDict = shareStatusDict
        self.shareDict = shareDict
        self.procDict = dict()
        self.dworker = dworker
        self.rworker = rworker
        self.sworker = sworker
        self.initLog()
        self.initRec()
        self.initRes()

    def initLog(self):
        self.colorfmt = "$MAGENTA%(asctime)s - $RED%(name)-20s$RESET - $COLOR%(levelname)-8s$RESET - $COLOR%(message)s$RESET"
        self.dlog = AcrcloudLogger('Monitor@Main', logging.INFO)
        if not self.dlog.addFilehandler(logfile = "Monitor.log", logdir = self.config["log"]["dir"]):
            sys.exit(1)
        if not self.dlog.addStreamHandler(self.colorfmt):
            sys.exit(1)

    def initRec(self):
        self.recproc = multiprocessing.Process(target=self.rworker,
                                               args=(self.recMainQueue,
                                                     self.recQueue,
                                                     self.resultQueue,
                                                     self.shareDict,
                                                     self.config))
        self.recproc.start()
        if not self.recproc.is_alive():
            self.dlog.logger.error('[email protected]_recognize.failed')
            sys.exit(1)
        else:
            self.dlog.logger.warn('[email protected]_recognize.success')

    def initRes(self):
        self.resproc = multiprocessing.Process(target=self.sworker,
                                               args=(self.resMainQueue,
                                                     self.resultQueue,
                                                     self.config))
        self.resproc.start()
        if not self.resproc.is_alive():
            self.dlog.logger.error('[email protected]_result.failed')
            sys.exit(1)
        else:
            self.dlog.logger.warn('[email protected]_result.success')

    def checkInfo(self, info):
        if len(info) >= 8:
            for key in self.addkeys:
                if info.get(key, 'None') == 'None':
                    return False
            return True
        return False

    def changeStat(self, id, index, msg):
        stat = self.shareStatusDict[id]
        stat[index] = msg
        self.shareStatusDict[id] = stat

    def startMonitor(self):
        try:
            for stream_id in self.shareMonitorDict.keys():
                jsoninfo = self.shareMonitorDict[stream_id][0]
                self.addMonitor(jsoninfo)
                time.sleep(0.5)
        except Exception as e:
            self.dlog.logger.error('*****@*****.**', exc_info=True)

    def addMonitor(self, jsoninfo):
        try:
            stream_id = jsoninfo.get('stream_id')
            if stream_id in self.shareMonitorDict and stream_id not in self.procDict:
                mainqueue = multiprocessing.Queue()
                proc = multiprocessing.Process(target=self.dworker,
                                               args=(jsoninfo, mainqueue,
                                                     self.recQueue,
                                                     self.shareStatusDict,
                                                     self.shareDict,
                                                     self.config))
                proc.daemon = True
                proc.start()
                if proc.is_alive():
                    self.procDict[stream_id] = [proc, mainqueue]
                    self.dlog.logger.warn('ADD Monitor ({0}, {1})'.format(jsoninfo['stream_id'], jsoninfo['stream_url']))
                    return True
        except Exception as e:
            self.dlog.logger.error('[email protected] Monitor Error:', exc_info=True)
        self.dlog.logger.error('[email protected] Monitor Failed ({0}, {1})'.format(jsoninfo['stream_id'], jsoninfo['stream_url']))
        return False

    def delMonitor(self, jsoninfo):
        try:
            stream_id = jsoninfo.get('stream_id')
            proc, mainqueue = self.procDict[stream_id]
            mainqueue.put('STOP')
            proc.join()
            if not proc.is_alive():
                #del self.shareStatusDict[stream_id]
                self.shareStatusDict[stream_id] = ['10#delete', '2#unknow']
                del self.procDict[stream_id]
                self.dlog.logger.warn('DEL Monitor ({0}, {1})'.format(jsoninfo['stream_id'], jsoninfo['stream_url']))
                return True
        except Exception as e:
            self.dlog.logger.error('[email protected] Monitor Error:', exc_info=True)
        self.dlog.logger.error('[email protected] Monitor Failed ({0}, {1})'.format(jsoninfo['stream_id'], jsoninfo['stream_url']))
        return False

    def delAllM(self):
        try:
            for stream_id in self.procDict.keys():
                if self.delMonitor({'stream_id':stream_id, 'stream_url':''}):
                    del self.shareMonitorDict[stream_id]
        except Exception as e:
            self.dlog.logger.error('Del All Monitors Error', exc_info=True)

    def reStart(self, jsoninfo):
        try:
            stream_id = jsoninfo.get('stream_id')
            if stream_id in self.shareMonitorDict:
                code, msg = self.shareStatusDict[stream_id][0].split('#')
                proc, mainqueue = self.procDict[stream_id]
                info, createTime = self.shareMonitorDict[stream_id][:2]
                if code == '4' or code == '3' or code == '6':
                    if proc.is_alive():
                        mainqueue.put('RESTART')
                        self.changeStat(stream_id, 0, '0#restart0')
                        self.dlog.logger.warn('Restart Monitor ({0}, {1}).'.format(jsoninfo['stream_id'], jsoninfo['stream_url']))
                        return True
                    else:
                        proc = multiprocessing.Process(target=self.dworker,
                                                       args=(info, mainqueue,
                                                             self.recQueue,
                                                             self.shareStatusDict,
                                                             self.shareDict,
                                                             self.config))
                        proc.daemon = True
                        proc.start()
                        if proc.is_alive():
                            self.procDict[stream_id][0] = proc
                            self.changeStat(stream_id, 0, '0#restart1')
                            self.dlog.logger.warn('Restart Monitor ({0}, {1}).'.format(jsoninfo['stream_id'], jsoninfo['stream_url']))
                            return True
        except Exception as e:
            self.dlog.logger.error('[email protected] Monitor Error:', exc_info=True)
        self.dlog.logger.error('[email protected] Monitor Failed ({0}, {1}).'.format(jsoninfo['stream_id'], jsoninfo['stream_url']))
        return False

    def refresh(self):
        try:
            for stream_id in self.shareMonitorDict.keys():
                jsoninfo, createTime, value = self.shareMonitorDict[stream_id]
                if value == 1:
                    self.dlog.logger.warn('[email protected]: {0} - Update'.format(jsoninfo.get('stream_id','')))
                    self.delMonitor(jsoninfo)
                    self.addMonitor(jsoninfo)
                elif value == 2:
                    self.dlog.logger.warn('[email protected]: {0} - New Add'.format(jsoninfo.get('stream_id','')))
                    self.addMonitor(jsoninfo)
                elif value == 3:
                    self.dlog.logger.warn('[email protected]: {0} - Delete'.format(jsoninfo.get('stream_id','')))
                    self.delMonitor(jsoninfo)
                    del self.shareMonitorDict[stream_id]
                time.sleep(1)
        except Exception as e:
            self.dlog.logger.error('[email protected] Monitor Error:', exc_info=True)

    def pauseM(self, jsoninfo):
        try:
            stream_id = jsoninfo.get('stream_id')
            if stream_id in self.shareMonitorDict:
                code, msg = self.shareStatusDict[stream_id][0].split('#')
                if code == '0' or code == '1':
                    proc, mainqueue = self.procDict[stream_id]
                    mainqueue.put('PAUSE')
                    self.dlog.logger.warn('PAUSE Monitor ({0}, {1}).'.format(jsoninfo['stream_id'], jsoninfo['stream_url']))
                    return True
        except Exception as e:
            self.dlog.logger.error('[email protected] Monitor Error:', exc_info=True)
        self.dlog.logger.error('[email protected] Monitor Failed ({0}, {1}).'.format(jsoninfo['stream_id'], jsoninfo['stream_url']))
        return False

    def doIt(self, cmd, info):
        try:
            if cmd == 'heiheihei':
                self.startMonitor()
            elif cmd == 'refresh':
                self.refresh()
            elif cmd == 'restart':
                self.reStart(info)
            elif cmd == 'pause':
                self.pauseM(info)
            elif cmd == 'stop':
                self.stop()
        except Exception as e:
            self.dlog.logger.error("doIt Error:", exc_info=True)

    def start(self):
        self._running = True
        while 1:
            if not self._running:
                break
            try:
                cmd, info = self.springQueue.get()
            except Queue.Empty:
                continue
            self.doIt(cmd, info)
            time.sleep(0.1)

    def stop(self):
        self.delAllM()
        self.dlog.logger.warn('Warn@Acrcloud_Manager.DelAllMontirs_Success')
        self.recMainQueue.put(('stop',''))
        self.resMainQueue.put(('stop',''))
        self._running = False
        self.dlog.logger.warn('Warn@Acrcloud_Manager_Stop')
        sys.exit(1)
class ACRCloud_Scan_Files:
    def __init__(self, config_file):
        self.config = {
            'host': '',
            'access_key': '',
            'access_secret': '',
            'debug': False,
            'timeout': 10  # seconds
        }
        self.config_file = config_file
        self.init_log()
        self.init_config()

    def init_log(self):
        self.dlog = AcrcloudLogger('ACRCloud_ScanF', logging.INFO)
        if not self.dlog.addFilehandler(logfile="log_scan_files.log",
                                        logdir="./",
                                        loglevel=logging.WARN):
            sys.exit(1)
        if not self.dlog.addStreamHandler():
            sys.exit(1)

    def init_config(self):
        try:
            json_config = None
            with codecs.open(self.config_file, 'r') as f:
                json_config = json.loads(f.read())
            for k in ["host", "access_key", "access_secret"]:
                if k in json_config and json_config[k].strip():
                    self.config[k] = str(json_config[k].strip())
                else:
                    self.dlog.logger.error(
                        "init_config.not found {0} from config.json, pls check"
                        .format(k))
                    sys.exit(1)

            self.re_handler = ACRCloudRecognizer(self.config)
            if self.re_handler:
                self.dlog.logger.warn("init_config success!")
        except Exception as e:
            self.dlog.logger.error("init_config.error", exc_info=True)

    def read_file(self, infile, jFirst=True):
        with open(infile, "rb") as rfile:
            for line in rfile:
                if jFirst:
                    jFirst = False
                    continue
                yield line.strip()

    def write_error(self, file_path, error_time, error_detail):
        with open(
                'error_scan.txt',
                'a',
        ) as f:
            msg = file_path + '||' + str(error_time) + '||' + str(
                error_detail) + '\n'
            f.write(msg)

    def empty_error_scan(self):
        if os.path.exists('error_scan.txt'):
            os.remove('error_scan.txt')

    def export_to_csv(self,
                      result_list,
                      export_filename="ACRCloud_ScanFile_Results.csv",
                      export_dir="./"):
        try:
            results = []
            for item in result_list:
                filename = item["file"]
                timestamp = item["timestamp"]
                jsoninfo = item["result"]
                if "status" in jsoninfo and jsoninfo["status"]["code"] == 0:
                    row = self.parse_data(jsoninfo)
                    row = [filename, timestamp] + list(row)
                    results.append(row)

            export_filepath = os.path.join(export_dir, export_filename)

            with codecs.open(export_filepath, 'w', 'utf-8-sig') as f:
                head_row = [
                    'filename', 'timestamp', 'title', 'artists', 'album',
                    'acrid', 'played_duration', 'label', 'isrc', 'upc',
                    'dezzer', 'spotify', 'itunes', 'youtube',
                    'custom_files_title', 'audio_id'
                ]
                dw = csv.writer(f)
                dw.writerow(head_row)
                dw.writerows(results)
                self.dlog.logger.info(
                    "export_to_csv.Save Data to csv: {0}".format(
                        export_filepath))
        except Exception as e:
            self.dlog.logger.error("Error export_to_csv", exc_info=True)

    def parse_data(self, jsoninfo):
        try:
            title, played_duration, isrc, upc, acrid, label, album = [""] * 7
            artists, deezer, spotify, itunes, youtube, custom_files_title, audio_id = [
                ""
            ] * 7

            metadata = jsoninfo.get('metadata', {})
            played_duration = metadata.get("played_duration", "")
            if "music" in metadata and len(metadata["music"]) > 0:
                item = metadata["music"][0]
                title = item.get("title", "")
                offset = item.get("play_offset_ms", "")
                isrc = item.get("external_ids", {"isrc": ""}).get("isrc", "")
                upc = item.get("external_ids", {"upc": ""}).get("upc", "")
                acrid = item.get("acrid", "")
                label = item.get("label", "")
                album = item.get("album", {"name": ""}).get("name", "")
                artists = ",".join([
                    ar["name"] for ar in item.get('artists', [{
                        "name": ""
                    }]) if ar.get("name")
                ])
                deezer = item.get("external_metadata", {
                    "deezer": {
                        "track": {
                            "id": ""
                        }
                    }
                }).get("deezer", {
                    "track": {
                        "id": ""
                    }
                }).get("track", {
                    "id": ""
                }).get("id", "")
                spotify = item.get("external_metadata", {
                    "spotify": {
                        "track": {
                            "id": ""
                        }
                    }
                }).get("spotify", {
                    "track": {
                        "id": ""
                    }
                }).get("track", {
                    "id": ""
                }).get("id", "")
                itunes = item.get("external_metadata", {
                    "itunes": {
                        "track": {
                            "id": ""
                        }
                    }
                }).get("itunes", {
                    "track": {
                        "id": ""
                    }
                }).get("track", {
                    "id": ""
                }).get("id", "")
                youtube = item.get("external_metadata", {
                    "youtube": {
                        "vid": ""
                    }
                }).get("youtube", {
                    "vid": ""
                }).get("vid", "")

            if "custom_files" in metadata and len(
                    metadata["custom_files"]) > 0:
                custom_item = metadata["custom_files"][0]
                custom_files_title = custom_item.get("title", "")
                audio_id = custom_item.get("audio_id", "")
        except Exception as e:
            self.dlog.logger.error(
                "parse_data.error.data:{0}".format(metadata), exc_info=True)

        res = (title, artists, album, acrid, played_duration, label, isrc, upc,
               deezer, spotify, itunes, youtube, custom_files_title, audio_id)
        return res

    def apply_filter(self, results):
        fworker = FilterWorker()
        result_new = fworker.apply_filter(results)
        return result_new

    def do_recognize(self, filepath, start_time, rec_length):
        try:
            current_time = time.strftime('%d %H:%M:%S',
                                         time.gmtime(start_time))
            res_data = self.re_handler.recognize_by_file(
                filepath, start_time, rec_length)
            return filepath, current_time, res_data
        except Exception as e:
            self.dlog.logger.error("do_recognize.error.({0}, {1}, {2})".format(
                filepath, start_time, rec_length),
                                   exc_info=True)
        return filepath, current_time, None

    def recognize_file(self,
                       filepath,
                       start_time,
                       stop_time,
                       step,
                       rec_length,
                       with_duration=0):
        self.dlog.logger.warn("scan_file.start_to_run: {0}".format(filepath))

        result = []
        for i in range(start_time, stop_time, step):
            filep, current_time, res_data = self.do_recognize(
                filepath, i, rec_length)
            try:
                jsoninfo = json.loads(res_data)
                code = jsoninfo['status']['code']
                msg = jsoninfo['status']['msg']
                if "status" in jsoninfo and jsoninfo["status"]["code"] == 0:
                    result.append({
                        "timestamp": current_time,
                        "rec_length": rec_length,
                        "result": jsoninfo,
                        "file": filep
                    })
                    res = self.parse_data(jsoninfo)
                    self.dlog.logger.info(
                        'recognize_file.(time:{0}, title: {1})'.format(
                            current_time, res[0]))
                if code == 2005:
                    self.dlog.logger.warn(
                        'recognize_file.(time:{0}, code:{1}, Done!)'.format(
                            current_time, code))
                    break
                elif code == 1001:
                    result.append({
                        "timestamp": current_time,
                        "rec_length": rec_length,
                        "result": jsoninfo,
                        "file": filep
                    })
                    self.dlog.logger.info(
                        "recognize_file.(time:{0}, code:{1}, No_Result)".
                        format(current_time, code))
                elif code == 3001:
                    self.dlog.logger.error(
                        'recognize_file.(time:{0}, code:{1}, Missing/Invalid Access Key)'
                        .format(current_time, code))
                    break
                elif code == 3003:
                    self.dlog.logger.error(
                        'recognize_file.(time:{0}, code:{1}, Limit exceeded)'.
                        format(current_time, code))
                elif code == 3000:
                    self.dlog.logger.error(
                        'recognize_file.(time:{0}, {1}, {2})'.format(
                            current_time, code, msg))
                    self.write_error(filepath, i, 'NETWORK ERROR')
                i += step
            except Exception as e:
                self.dlog.logger.error('recognize_file.error', exc_info=True)
                self.write_error(filepath, i, 'JSON ERROR')
        return result

    def scan_file_main(self, option, start_time, stop_time):
        try:
            filepath = option.file_path
            step = option.step
            rec_length = option.rec_length
            with_duration = option.with_duration
            out_dir = option.out_dir
            if start_time == 0 and stop_time == 0:
                file_total_seconds = int(
                    ACRCloudRecognizer.get_duration_ms_by_file(filepath) /
                    1000)
                results = self.recognize_file(filepath, start_time,
                                              file_total_seconds, step,
                                              rec_length, with_duration)
            else:
                results = self.recognize_file(filepath, start_time, stop_time,
                                              step, rec_length, with_duration)

            filename = 'result-' + os.path.basename(filepath.strip()) + '.csv'
            fpath = os.path.join(out_dir, filename)
            if os.path.exists(fpath):
                os.remove(fpath)
            if results:
                self.export_to_csv(results, filename, out_dir)

            if with_duration == 1:
                new_results = []
                if results:
                    new_results = self.apply_filter(results)
                filename_with_duration = 'result-' + os.path.basename(
                    filepath.strip()) + '_with_duration.csv'
                self.export_to_csv(new_results, filename_with_duration,
                                   out_dir)
        except Exception as e:
            self.dlog.logger.error("scan_file_main.error", exc_info=True)

    def scan_folder_main(self, option, start_time, stop_time):
        try:
            path = option.folder_path
            file_list = os.listdir(path)
            for i in file_list:
                option.file_path = path + '/' + i
                self.scan_file_main(option, start_time, stop_time)
        except Exception as e:
            self.dlog.logger.error("scan_folder_main.error", exc_info=True)
class AcrcloudWorker:
    def __init__(self, info, mainqueue, recqueue, shareStatusDict, shareDict,
                 config):
        self._info = info
        self._downloadFun = None
        self._config = config
        self._mainqueue = mainqueue
        self._recqueue = recqueue
        self._shareStatusDict = shareStatusDict
        self._shareDict = shareDict
        self._workQueue = Queue.Queue()
        self._download_cmdQueue = Queue.Queue()
        self._downloadHandler = None
        self._collectHandler = None
        self._stream_id = str(info.get('stream_id', ''))
        self.initLog()
        self.initConfig(info)
        self.isFirst = True

    def initLog(self):
        self._dlog = AcrcloudLogger("SWorker_{0}.log".format(self._stream_id),
                                    logging.INFO)
        if not self._dlog.addFilehandler(logfile="SWorker_{0}.log".format(
                self._stream_id),
                                         logdir=self._config['log']['dir']):
            sys.exit(1)
        if not self._dlog.addStreamHandler():
            sys.exit(1)

    def initConfig(self, info):
        try:
            self._dlog.logger.info('initConfig start...')

            self._rec_host = str(info.get('rec_host', ''))
            self._access_key = str(info.get('access_key', ''))
            self._access_secret = str(info.get('access_secret', ''))
            self._stream_url = str(info.get('stream_url', ''))
            self._monitor_interval = info.get('interval', 5)
            self._monitor_length = info.get('monitor_length', 20)
            self._monitor_timeout = info.get('monitor_timeout', 30)
            self._timeout_Threshold = 20  #self._config["server"]["timeout_Threshold"]
            self._rec_timeout = info.get('rec_timeout', 5)
            self.baseRebornTime = 20  #self._config["server"]["reborn_Time_Sec"]
            self.rebornTime = 0
            self.deadThreshold = 20  #self._config["server"]["dead_Threshold"]
            self.isFirst = True

            if self._monitor_timeout <= self._monitor_interval + self._monitor_length:
                self._monitor_timeout = self._monitor_interval + self._monitor_length + 2
            self._downloadFun = acrcloud_download
            if not self._downloadFun:
                self._dlog.logger.error('init downloadFunc error')
                self.changeStat(0, "8#error@downloadfun_init")
                sys.exit(1)
        except Exception as e:
            self._dlog.logger.error('*****@*****.**',
                                    exc_info=True)
            sys.exit(1)

    def changeStat(self, index, msg):
        stat = self._shareStatusDict[self._stream_id]
        stat[index] = msg
        self._shareStatusDict[self._stream_id] = stat

    def newStart(self):

        self._collectHandler = Worker_CollectData(
            self._rec_host, self._stream_id, self._stream_url,
            self._access_key, self._access_secret, self._workQueue,
            self._recqueue, self._shareDict, self._dlog.logger,
            self._monitor_length, self._monitor_interval,
            self._monitor_timeout, self._timeout_Threshold)
        self._collectHandler.start()

        self._downloadHandler = Worker_DownloadStream(
            self._stream_url, self._workQueue, self._download_cmdQueue,
            self._downloadFun, self._dlog.logger, self._monitor_timeout,
            self._timeout_Threshold, self.isFirst)
        self.isFirst = False
        self._downloadHandler.start()

    def nowStop(self):
        self._downloadHandler.stop()
        self._collectHandler.stop()

    def deal_mainCMD(self, recv):
        isbreak = False
        if recv == 'STOP':
            self.nowStop()
            self._dlog.logger.warn("mainQueue receive 'STOP' & JUST STOP")
            isbreak = True
        elif recv == 'PAUSE':
            self.pauseflag = True
            self.nowStop()
            self._dlog.logger.warn("mainQueue receive 'PAUSE' & JUST PAUSE")
            self.changeStat(0, "4#pause")
        elif recv == 'RESTART':
            self.nowStop()
            self.newStart()
            self.pauseflag = False
        return isbreak

    def deal_workerCMD(self, recv_thread):
        isbreak = False
        if recv_thread.startswith("STATUS"):
            status = recv_thread.split("#")
            self.changeStat(0, recv_thread[len('STATUS#'):])
            if status[1] == '2':
                self._dlog.logger.warn("cmdQueue receive 'DEAD' & JUST SLEEP")
                self.nowStop()
                self.deadcount += 1
                self.rebornTime = self.baseRebornTime * self.deadcount
                self.deadflag = True
                self.deadTime = datetime.datetime.now()
                if self.deadcount >= self.deadThreshold:
                    self.killedcount += 1
                    self.killedflag = True
                    self.deadflag = False
                    self.deadcount = 0
                    self.changeStat(0, "3#killed")
                    self._dlog.logger.error(
                        "Dead Count Reach Threshold({0}), Monitor will killed".
                        format(self.deadThreshold))
                    self.killedTime = datetime.datetime.now()
            elif status[1] == '3':
                pass
            elif status[1] == '6':
                self._dlog.logger.error(
                    "Invalid Stream_Url, This Monitor will killed")
                self.nowStop()
                self.invalid_url_flag = True
                self.invalid_url_time = datetime.datetime.now()
                self.deadflag = False
                self.deadcount = 0
                self.killedflag = False
                self.killedcount = 0
            elif status[1] == '0':
                self.deadcount = 0
                self.killedcount = 0
        elif recv_thread.startswith("ISVIDEO"):
            self.changeStat(1, recv_thread[len('ISVIDEO#'):])

    def start(self):
        self.newStart()
        self.deadTime = None
        self.deadflag = False
        self.deadcount = 0
        self.pauseflag = False
        self.killedTime = None
        self.killedflag = False
        self.killedcount = 0
        self.killed_reborn_hours = 1
        self.invalid_url_flag = False
        self.invalid_url_time = None
        self.invalid_url_rebornTime = 2 * 60 * 60  #2 hours
        while 1:
            recv = ''
            recv_thread = ''
            if self.invalid_url_flag:
                invalidpassTime = (datetime.datetime.now() -
                                   self.invalid_url_time).seconds
                if invalidpassTime % (10 * 60) == 0:
                    self._dlog.logger.warn(
                        "Invalid URL Worker Restart Time: {0}s/{1}s".format(
                            invalidpassTime, self.invalid_url_rebornTime))
                if invalidpassTime >= self.invalid_url_rebornTime:
                    self._dlog.logger.warn("Invalid URL Try Restart...")
                    self.newStart()
                    self.invalid_url_time = None
                    self.invalid_url_flag = False

            if self.deadflag:
                passTime = (datetime.datetime.now() - self.deadTime).seconds
                if passTime % 10 == 0:
                    self._dlog.logger.warn(
                        "Worker Reborn Time: {0}s/{1}s".format(
                            passTime, self.rebornTime))
                if passTime >= self.rebornTime:
                    self._dlog.logger.warn("Worker Reborn...")
                    self.newStart()
                    self.deadTime = None
                    self.deadflag = False

            if self.killedflag:
                killedpassTime = (datetime.datetime.now() -
                                  self.killedTime).seconds
                if self.killedcount in range(1, 6):
                    self.killed_reborn_hours = pow(2, self.killedcount - 1)
                elif self.killedcount >= 6:
                    self.killed_reborn_hours = pow(2, 5)
                else:
                    self.killed_reborn_hours = 1
                if killedpassTime % 1000 == 0:
                    self._dlog.logger.warn(
                        "Killed Worker Reborn Time: {0}/{1} (hours)".format(
                            round(killedpassTime / 3600.0, 2),
                            self.killed_reborn_hours))
                if killedpassTime >= self.killed_reborn_hours * 3600:
                    self._dlog.logger.warn("Killed Worker Reborn...")
                    self.newStart()
                    self.killedTime = None
                    self.killedflag = False
            try:
                recv = self._mainqueue.get(block=False)
            except Queue.Empty:
                time.sleep(0.5)
            if self.deal_mainCMD(recv):
                break

            try:
                recv_thread = self._download_cmdQueue.get(block=False)
            except Queue.Empty:
                time.sleep(0.5)
            if self.deal_workerCMD(recv_thread):
                break
Exemple #15
0
class Acrcloud_Rec_Manager:
    def __init__(self, mainqueue, recqueue, resultqueue, shareDict, config):
        self._mainQueue = mainqueue
        self._recQueue = recqueue
        self._resultQueue = resultqueue
        self._shareDict = shareDict
        self._config = config
        self._rec_pool = []
        self.initLog()

        self.stream_assign_index = 0
        self.stream_assign_map = {}

        self._init_pool_num = 3
        self.initPoolWorkers()

    def exitRecM(self, msg):
        print msg
        sys.exit(1)

    def initLog(self):
        self._dlog = AcrcloudLogger("RecManager", logging.INFO)
        if not self._dlog.addFilehandler(logfile="RecManager.log",
                                         logdir=self._config['log']['dir']):
            self.exitRecM('rec_error#0#init_flog_error')
        if not self._dlog.addStreamHandler():
            self.exitRecM('rec_error#0#init_slog_error')

    def initPoolWorkers(self):
        try:
            for i in range(self._init_pool_num):
                tmp_poolQueue = multiprocessing.Queue()
                pool_proc = multiprocessing.Process(
                    target=poolWorker,
                    args=(i, tmp_poolQueue, self._resultQueue, self._shareDict,
                          self._config))
                pool_proc.daemon = True
                pool_proc.start()
                if not pool_proc.is_alive():
                    self._dlog.logger.error(
                        '[email protected]_rec_pool:{0}.failed'.
                        format(i))
                    sys.exit(1)
                else:
                    self._dlog.logger.warn(
                        '[email protected]_rec_pool:{0}.success'.
                        format(i))

                self._rec_pool.append((i, tmp_poolQueue, pool_proc))
            self._dlog.logger.warn(
                '[email protected]_Success.(Total Num:{0})'.format(
                    self._init_pool_num))
        except Exception as e:
            self._dlog.logger.error('Error@initPoolWorkers', exc_info=True)
            self.exitRecM('rec_error#3#init_rec_workers_error')

    def delPoolWorkers(self):
        try:
            for id, pool_queue, pool_proc in self._rec_pool:
                pool_queue.put(('cmd', 'stop'))
                self._dlog.logger.warning(
                    'Warn@Del_Rec_PoolWorkers.send_stop_cmd_to_pool.(pool_id:{0})'
                    .format(id))
        except Exception as e:
            self._dlog.logger.error('Error@Del_Rec_PoolWorkers', exc_info=True)

    def addTask(self, recinfo):
        try:
            stream_id = recinfo[1]
            if stream_id not in self.stream_assign_map:
                tmp_index = self.stream_assign_index % self._init_pool_num
                self.stream_assign_map[stream_id] = tmp_index
                self.stream_assign_index += 1

            pool_index = self.stream_assign_map[stream_id]
            pool_queue = self._rec_pool[pool_index][1]
            pool_queue.put(('rec', recinfo))
            if random.random() < 0.1:
                self._dlog.logger.warn(
                    "[email protected]:{0}, PoolID:{1}, PoolQSize:{2}".
                    format(self._recQueue.qsize(), pool_index,
                           pool_queue.qsize()))
        except Exception as e:
            self._dlog.logger.error('Error@addTask', exc_info=True)

    def start(self):
        self._running = True
        while 1:
            if not self._running:
                break
            try:
                cmdinfo = self._mainQueue.get(block=False)
                if cmdinfo[0] == 'stop':
                    self.stop()
            except Queue.Empty:
                time.sleep(0.01)
            try:
                recinfo = self._recQueue.get(block=False)
                self.addTask(recinfo)
            except Queue.Empty:
                time.sleep(0.01)

    def stop(self):
        self.delPoolWorkers()
        self._running = False
        self._dlog.logger.warn('Warn@Acrcloud_Recoginze_Manager_Stop')
        sys.exit(1)
Exemple #16
0
class Acrcloud_Rec_Pool:
    def __init__(self, rec_pool_id, poolqueue, resultqueue, shareDict, config):
        self._rec_pool_id = rec_pool_id
        self._poolQueue = poolqueue
        self._resultQueue = resultqueue
        self._shareDict = shareDict
        self._config = config
        self._recognizer = None
        self._workerpool = []
        self._taskQueue = Queue.Queue(
        )  #Manager receive audio and put to taskQueue to rec

        self.initLog()
        self.initConfig()
        self.initWorkers(self._init_nums)
        self._dlog.logger.warn("Rec Pool Init Success, pool_id:{0}".format(
            self._rec_pool_id))

    def exitRecM(self, msg):
        print msg
        sys.exit(1)

    def initLog(self):
        self._dlog = AcrcloudLogger("RecPool_{0}".format(self._rec_pool_id),
                                    logging.INFO)
        if not self._dlog.addFilehandler(logfile="RecPool_{0}.log".format(
                self._rec_pool_id),
                                         logdir=self._config['log']['dir']):
            self.exitRecM(
                'rec_error#0#init_flog_error, rec_pool_id:{0}'.format(
                    self._rec_pool_id))
        if not self._dlog.addStreamHandler():
            self.exitRecM(
                'rec_error#0#init_slog_error, rec_pool_id:{0}'.format(
                    self._rec_pool_id))

    def initConfig(self):
        #self._host = self._config['recognize']['host']
        #self._query_type = self._config['recognize']['query_type']
        init_nums_map = {'4core': 20, '8core': 30, '16core': 40, '32core': 60}
        cpu_core = multiprocessing.cpu_count()
        self._init_nums = init_nums_map.get(str(cpu_core) + 'core', 30)
        self._worker_num = 0

        #####################
        ## init recognizer ##
        #####################
        self._recognizer = acrcloud_recognize(self._dlog)
        if not self._recognizer:
            self._dlog.logger.error('init recognize error')
            self.exitRecM('rec_error#1#init_recognize_error')

    def initWorkers(self, new_nums):
        try:
            for i in range(new_nums):
                rechandler = Acrcloud_Rec_Worker(
                    self._rec_pool_id, self._worker_num, self._shareDict,
                    self._recognizer, self._taskQueue, self._resultQueue,
                    self._dlog.logger)
                rechandler.start()
                self._workerpool.append((self._worker_num, rechandler))
                self._worker_num += 1
            self._dlog.logger.warn(
                'Warn@PoolID:{0}.initWorkers.Init_Success.(Num:{1})'.format(
                    self._rec_pool_id, new_nums))
        except Exception as e:
            self._dlog.logger.error('Error@initWorkers', exc_info=True)
            self.exitRecM('rec_error#3#init_rec_pool_workers_error')

    def delWorkers(self):
        try:
            count = 0
            for id, handler in self._workerpool:
                handler.stop()
                self._worker_num -= 1
                count += 1
            self._dlog.logger.warning(
                'Warn@Del_Rec_Pool_Workers_Success.(Totle Num:{0})'.format(
                    count))
        except Exception as e:
            self._dlog.logger.error('Error@Del_Rec_Pool_Workers',
                                    exc_info=True)

    def addTask(self, recinfo):
        try:
            self._taskQueue.put(recinfo)
            if random.random() < 0.1:
                self._dlog.logger.warn(
                    "[email protected]:{0}, TaskQSize:{1}".format(
                        self._poolQueue.qsize(), self._taskQueue.qsize()))
        except Exception as e:
            self._dlog.logger.error('*****@*****.**', exc_info=True)

    def start(self):
        self._running = True
        while 1:
            if not self._running:
                break
            try:
                itype, recinfo = self._poolQueue.get()
                if itype == "cmd" and recinfo == 'stop':
                    self.stop()
                else:
                    self.addTask(recinfo)
            except Queue.Empty:
                pass

    def stop(self):
        self.delWorkers()
        self._running = False
        self._dlog.logger.warn('Warn@Acrcloud_Recoginze_Pool_Stop')
        sys.exit(1)
class AcrcloudManager:
    def __init__ (self, springboard):
        self.monitor = springboard
        self.sockNum = 0
        self.sockIndex = 1
        self.client2id = {}
        self.id2client = {}
        self.initLog()

    def initLog(self):
        self.colorfmt = "$MAGENTA%(asctime)s$RESET - $RED%(name)-20s$RESET - $COLOR%(levelname)-8s$RESET - $COLOR%(message)s$RESET"
        self.dlog = AcrcloudLogger('Client@Main', logging.INFO)
        if not self.dlog.addStreamHandler(self.colorfmt):
            sys.exit(1)

    def addClient(self, client):
        self.sockNum = self.sockNum + 1
        self.client2id[client] = self.sockIndex
        self.id2client[self.sockIndex] = client
        self.dlog.logger.info('New Client, ID: {0}'.format(self.sockIndex))
        self.sockIndex = self.sockIndex + 1

    def delClient(self, client):
        if client in self.client2id:
            self.sockNum = self.sockNum - 1
            _sockid = self.client2id[client]
            del self.client2id[client]
            del self.id2client[_sockid]
            self.dlog.logger.info('Close Client, ID: {0}'.format(_sockid))

    def getSockid(self, client):
        if client in self.client2id:
            return self.client2id[client]
        else:
            return None

    def getClient(self, sockid):
        if sockid in self.id2client:
            return self.id2client[sockid]
        else:
            return None

    def recData(self, recdata):
        datainfo = recdata[:-2].split('\r\n', 1)
        if len(datainfo) == 2:
            cmd_info, data_block = datainfo
            cmd_info = cmd_info.split()
            if len(cmd_info) != 5:
                return 'ERROR'
            if cmd_info[0] == 'set':
                ret = ''
                if cmd_info[1] == 'restart':
                    ret = self.monitor.reStart(data_block)
                elif cmd_info[1] == 'refresh':
                    ret = self.monitor.reFresh()
                elif cmd_info[1] == 'pause':
                    ret = self.monitor.pauseM(data_block)
                elif cmd_info[1] == 'stop':
                    ret = self.monitor.stop()
                else:
                    ret = "NOT_STORED"
                return ret
            else:
                return "ERROR"
        elif len(datainfo) == 1:
            cmd_info = datainfo[0].split()
            if cmd_info[0] == 'get':
                ret = ''
                if cmd_info[1].startswith('state:'):
                    id = cmd_info[1].split(':')[1]
                    sd = self.monitor.getStat(id.strip())
                    return 'VALUE {0} 0 {1}\r\n{2}'.format(cmd_info[1], len(sd), sd)
                else:
                    return "END"
            else:
                return 'ERROR'
        else:
            return "ERROR"
 def initLog(self):
     self.colorfmt = "$MAGENTA%(asctime)s$RESET - $RED%(name)-20s$RESET - $COLOR%(levelname)-8s$RESET - $COLOR%(message)s$RESET"
     self.dlog = AcrcloudLogger('Client@Main', logging.INFO)
     if not self.dlog.addStreamHandler(self.colorfmt):
         sys.exit(1)
class Acrcloud_Rec_Manager:

    def __init__(self, mainqueue, recqueue, resultqueue, shareDict, config):
        self._mainQueue = mainqueue
        self._recQueue = recqueue
        self._resultQueue = resultqueue
        self._shareDict = shareDict
        self._config = config
        self._recognizer = None
        self._workerpool = []
        self.initLog()
        self.initConfig()
        self.initWorkers(self._init_nums)

    def initLog(self):
        self._dlog = AcrcloudLogger("RecManager", logging.INFO)
        if not self._dlog.addFilehandler(logfile = "RecWorker.log", logdir = self._config['log']['dir']):
            self.exitRecM('rec_error#0#init_flog_error')
        if not self._dlog.addStreamHandler():
            self.exitRecM('rec_error#0#init_slog_error')
            
    def initConfig(self):
        #self._host = self._config['recognize']['host']
        #self._query_type = self._config['recognize']['query_type']
        init_nums_map = {'4core':20, '8core':30, '16core':40, '32core':60}
        cpu_core = multiprocessing.cpu_count()
        self._init_nums = init_nums_map.get(str(cpu_core)+'core', 20)
        self._worker_num = 0
        
        #####################
        ## init recognizer ##
        #####################
        self._recognizer = acrcloud_recognize(self._dlog)
        if not self._recognizer:
            self._dlog.logger.error('init recognize error')
            self.exitRecM('rec_error#1#init_recognize_error')
            
    def initWorkers(self, new_nums):
        try:
            for i in range(new_nums):
                rechandler = Acrcloud_Rec_Worker(self._worker_num,
                                                 self._shareDict,
                                                 self._recognizer,
                                                 self._recQueue,
                                                 self._resultQueue,
                                                 self._dlog.logger)
                rechandler.start()
                self._workerpool.append((self._worker_num, rechandler))
                self._worker_num += 1
            self._dlog.logger.warn('Warn@Acrcloud_Rec_Worker(Num:{0}).Init_Success'.format(new_nums))
        except Exception as e:
            self._dlog.logger.error('Error@Init_Rec_Workers', exc_info=True)
            self.exitRecM('rec_error#3#init_rec_workers_error')

    def delWorkers(self):
        try:
            count = 0
            for id, handler in self._workerpool:
                handler.stop()
                self._worker_num -= 1
                count += 1
            self._dlog.logger.warning('Warn@Del_Rec_Workers_Success.(Totle Num:{0})'.format(count))
        except Exception as e:
            self._dlog.logger.error('Error@Del_Rec_Workers', exc_info=True)
            self._mainQueue.put('rec_error#4#del_rec_workers_error')

    def start(self):
        self._running = True
        while 1:
            if not self._running:
                break
            try:
                cmdinfo = self._mainQueue.get()
            except Queue.Empty:
                time.sleep(2)
            if cmdinfo[0] == 'stop':
                self.stop()
            
    def stop(self):
        self.delWorkers()
        self._running = False
        self._dlog.logger.warn('Warn@Acrcloud_Recoginze_Stop')
class AcrcloudWorker:

    def __init__(self, info, mainqueue, recqueue, shareStatusDict, shareDict, config):
        self._info = info
        self._downloadFun = None
        self._config = config
        self._mainqueue = mainqueue
        self._recqueue = recqueue
        self._shareStatusDict = shareStatusDict
        self._shareDict = shareDict
        self._workQueue = Queue.Queue()
        self._download_cmdQueue = Queue.Queue()
        self._downloadHandler = None
        self._collectHandler = None
        self._stream_id = str(info.get('stream_id', ''))
        self.initLog()
        self.initConfig(info)
        self.isFirst = True

    def initLog(self):
        self._dlog = AcrcloudLogger("SWorker_{0}.log".format(self._stream_id), logging.INFO)
        if not self._dlog.addFilehandler(logfile = "SWorker_{0}.log".format(self._stream_id), logdir = self._config['log']['dir']):
            sys.exit(1)
        if not self._dlog.addStreamHandler():
            sys.exit(1)

    def initConfig(self, info):
        try:
            self._dlog.logger.info('initConfig start...')

            self._rec_host = str(info.get('rec_host', ''))
            self._access_key = str(info.get('access_key', ''))
            self._access_secret = str(info.get('access_secret', ''))
            self._stream_url = str(info.get('stream_url', ''))
            self._monitor_interval = info.get('interval', 5)
            self._monitor_length = info.get('monitor_length', 20)
            self._monitor_timeout = info.get('monitor_timeout', 30)
            self._timeout_Threshold = 20 #self._config["server"]["timeout_Threshold"]
            self._rec_timeout = info.get('rec_timeout', 5)
            self.baseRebornTime = 20 #self._config["server"]["reborn_Time_Sec"]
            self.rebornTime = 0
            self.deadThreshold = 20 #self._config["server"]["dead_Threshold"]
            self.isFirst = True

            if self._monitor_timeout <= self._monitor_interval + self._monitor_length:
                self._monitor_timeout = self._monitor_interval + self._monitor_length + 2
            self._downloadFun = acrcloud_download
            if not self._downloadFun:
                self._dlog.logger.error('init downloadFunc error')
                self.changeStat(0, "8#error@downloadfun_init")
                sys.exit(1)
        except Exception as e:
            self._dlog.logger.error('*****@*****.**', exc_info=True)
            sys.exit(1)

    def changeStat(self, index, msg):
        stat = self._shareStatusDict[self._stream_id]
        stat[index] = msg
        self._shareStatusDict[self._stream_id] = stat

    def newStart(self):
        self._collectHandler = Worker_CollectData(self._rec_host,
                                                  self._stream_id,
                                                  self._stream_url,
                                                  self._access_key,
                                                  self._access_secret,
                                                  self._workQueue,
                                                  self._recqueue,
                                                  self._shareDict,
                                                  self._dlog.logger,
                                                  self._monitor_length,
                                                  self._monitor_interval,
                                                  self._monitor_timeout,
                                                  self._timeout_Threshold)
        self._collectHandler.start()

        self._downloadHandler = Worker_DownloadStream(self._stream_url,
                                                      self._workQueue,
                                                      self._download_cmdQueue,
                                                      self._downloadFun,
                                                      self._dlog.logger,
                                                      self._monitor_timeout,
                                                      self._timeout_Threshold,
                                                      self.isFirst)
        self.isFirst = False
        self._downloadHandler.start()


    def nowStop(self):
        self._downloadHandler.stop()
        self._collectHandler.stop()

    def deal_mainCMD(self, recv):
        isbreak = False
        if  recv == 'STOP':
            self.nowStop()
            self._dlog.logger.warn("mainQueue receive 'STOP' & JUST STOP")
            isbreak = True
        elif recv == 'PAUSE':
            self.pauseflag = True
            self.nowStop()
            self._dlog.logger.warn("mainQueue receive 'PAUSE' & JUST PAUSE")
            self.changeStat(0, "4#pause")
        elif recv == 'RESTART':
            self.nowStop()
            self.newStart()
            self.pauseflag = False
        return isbreak

    def deal_workerCMD(self, recv_thread):
        isbreak = False
        if recv_thread.startswith("STATUS"):
            status = recv_thread.split("#")
            self.changeStat(0, recv_thread[len('STATUS#'):])
            if status[1] == '2':
                self._dlog.logger.warn("cmdQueue receive 'DEAD' & JUST SLEEP")
                self.nowStop()
                self.deadcount += 1
                self.rebornTime = self.baseRebornTime * self.deadcount
                self.deadflag = True
                self.deadTime = datetime.datetime.now()
                if self.deadcount >= self.deadThreshold:
                    self.killedcount += 1
                    self.killedflag = True
                    self.deadflag = False
                    self.deadcount = 0
                    self.changeStat(0, "3#killed")
                    self._dlog.logger.error("Dead Count Reach Threshold({0}), Monitor will killed".format(self.deadThreshold))
                    self.killedTime = datetime.datetime.now()
            elif status[1] == '3':
                pass
            elif status[1] == '6':
                self._dlog.logger.error("Invalid Stream_Url, This Monitor will killed")
                self.nowStop()
                self.invalid_url_flag = True
                self.invalid_url_time = datetime.datetime.now()
                self.deadflag = False
                self.deadcount = 0
                self.killedflag = False
                self.killedcount = 0
            elif status[1] == '0':
                self.deadcount = 0
                self.killedcount = 0
        elif recv_thread.startswith("ISVIDEO"):
            self.changeStat(1, recv_thread[len('ISVIDEO#'):])

    def start(self):
        self.newStart()
        self.deadTime = None
        self.deadflag = False
        self.deadcount = 0
        self.pauseflag = False
        self.killedTime = None
        self.killedflag = False
        self.killedcount = 0
        self.killed_reborn_hours = 1
        self.invalid_url_flag = False
        self.invalid_url_time = None
        self.invalid_url_rebornTime = 2*60*60 #2 hours
        while 1:
            recv = ''
            recv_thread = ''
            if self.invalid_url_flag:
                invalidpassTime = (datetime.datetime.now() - self.invalid_url_time).seconds
                if invalidpassTime % (10*60) == 0:
                    self._dlog.logger.warn("Invalid URL Worker Restart Time: {0}s/{1}s".format(invalidpassTime,
                                                                                              self.invalid_url_rebornTime))
                if invalidpassTime >= self.invalid_url_rebornTime:
                    self._dlog.logger.warn("Invalid URL Try Restart...")
                    self.newStart()
                    self.invalid_url_time = None
                    self.invalid_url_flag = False

            if self.deadflag:
                passTime = (datetime.datetime.now() - self.deadTime).seconds
                if passTime % 10 == 0:
                    self._dlog.logger.warn("Worker Reborn Time: {0}s/{1}s".format(passTime, self.rebornTime))
                if passTime >= self.rebornTime:
                    self._dlog.logger.warn("Worker Reborn...")
                    self.newStart()
                    self.deadTime = None
                    self.deadflag = False

            if self.killedflag:
                killedpassTime = (datetime.datetime.now() - self.killedTime).seconds
                if self.killedcount in range(1, 6):
                    self.killed_reborn_hours = pow(2, self.killedcount-1)
                elif self.killedcount >= 6:
                    self.killed_reborn_hours = pow(2, 5)
                else :
                    self.killed_reborn_hours = 1
                if killedpassTime % 1000 == 0:
                    self._dlog.logger.warn("Killed Worker Reborn Time: {0}/{1} (hours)".format(round(killedpassTime/3600.0, 2), self.killed_reborn_hours))
                if  killedpassTime >= self.killed_reborn_hours*3600:
                    self._dlog.logger.warn("Killed Worker Reborn...")
                    self.newStart()
                    self.killedTime = None
                    self.killedflag = False
            try:
                recv = self._mainqueue.get(block=False)
            except Queue.Empty:
                time.sleep(0.5)
            if self.deal_mainCMD(recv):
                break

            try:
                recv_thread = self._download_cmdQueue.get(block=False)
            except Queue.Empty:
                time.sleep(0.5)
            if self.deal_workerCMD(recv_thread):
                break
            time.sleep(0.1)
class ACRCloud_Scan_Files:

    def __init__(self, config_file):
        self.config = {
            'host': '',
            'access_key': '',
            'access_secret': '',
            'debug': False,
            'timeout': 10  # seconds
        }
        self.config_file = config_file
        self.init_log()
        self.init_config()

    def init_log(self):
        self.dlog = AcrcloudLogger('ACRCloud_ScanF', logging.INFO)
        if not self.dlog.addFilehandler(logfile = "log_scan_files.log", logdir = "./", loglevel = logging.WARN):
            sys.exit(1)
        if not self.dlog.addStreamHandler():
            sys.exit(1)

    def init_config(self):
        try:
            json_config = None
            with codecs.open(self.config_file, 'r') as f:
                json_config = json.loads(f.read())
            for k in ["host", "access_key", "access_secret"]:
                if k in json_config and json_config[k].strip():
                    self.config[k] = str(json_config[k].strip())
                else:
                    self.dlog.logger.error("init_config.not found {0} from config.json, pls check".format(k))
                    sys.exit(1)

            self.re_handler = ACRCloudRecognizer(self.config)
            if self.re_handler:
                self.dlog.logger.warn("init_config success!")
        except Exception as e:
            self.dlog.logger.error("init_config.error", exc_info=True)

    def read_file(self, infile, jFirst=True):
        with open(infile, "rb") as rfile:
            for line in rfile:
                if jFirst:
                    jFirst = False
                    continue
                yield line.strip()

    def write_error(self, file_path, error_time, error_detail):
        with open('error_scan.txt', 'a',) as f:
            msg = file_path + '||' + str(error_time) + '||' + str(error_detail) + '\n'
            f.write(msg)

    def empty_error_scan(self):
        if os.path.exists('error_scan.txt'):
            os.remove('error_scan.txt')

    def export_to_csv(self, result_list, export_filename="ACRCloud_ScanFile_Results.csv", export_dir="./"):
        try:
            results = []
            for item in result_list:
                filename = item["file"]
                timestamp = item["timestamp"]
                jsoninfo = item["result"]
                if "status" in jsoninfo and jsoninfo["status"]["code"] == 0:
                    row = self.parse_data(jsoninfo)
                    row = [filename, timestamp] + list(row)
                    results.append(row)

            export_filepath = os.path.join(export_dir, export_filename)

            with codecs.open(export_filepath, 'w', 'utf-8-sig') as f:
                head_row = ['filename', 'timestamp', 'title', 'artists', 'album', 'acrid', 'played_duration', 'label',
                            'isrc', 'upc', 'dezzer', 'spotify', 'itunes', 'youtube', 'custom_files_title', 'audio_id']
                dw = csv.writer(f)
                dw.writerow(head_row)
                dw.writerows(results)
                self.dlog.logger.info("export_to_csv.Save Data to csv: {0}".format(export_filepath))
        except Exception as e:
            self.dlog.logger.error("Error export_to_csv", exc_info=True)

    def parse_data(self, jsoninfo):
        try:
            title, played_duration, isrc, upc, acrid, label, album = [""]*7
            artists, deezer, spotify, itunes, youtube, custom_files_title, audio_id  = [""]*7

            metadata = jsoninfo.get('metadata', {})
            played_duration = metadata.get("played_duration", "")
            if "music" in metadata and len(metadata["music"]) > 0:
                item = metadata["music"][0]
                title = item.get("title", "")
                offset = item.get("play_offset_ms", "")
                isrc = item.get("external_ids", {"isrc":""}).get("isrc","")
                upc = item.get("external_ids", {"upc":""}).get("upc","")
                acrid = item.get("acrid","")
                label = item.get("label", "")
                album = item.get("album", {"name":""}).get("name", "")
                artists =  ",".join([ ar["name"] for ar in item.get('artists', [{"name":""}]) if ar.get("name") ])
                deezer = item.get("external_metadata", {"deezer":{"track":{"id":""}}}).get("deezer", {"track":{"id":""}}).get("track", {"id":""}).get("id", "")
                spotify = item.get("external_metadata", {"spotify":{"track":{"id":""}}}).get("spotify", {"track":{"id":""}}).get("track", {"id":""}).get("id", "")
                itunes = item.get("external_metadata", {"itunes":{"track":{"id":""}}}).get("itunes", {"track":{"id":""}}).get("track", {"id":""}).get("id", "")
                youtube = item.get("external_metadata", {"youtube":{"vid":""}}).get("youtube", {"vid":""}).get("vid", "")

            if "custom_files" in metadata and len(metadata["custom_files"]) > 0:
                custom_item = metadata["custom_files"][0]
                custom_files_title = custom_item.get("title", "")
                audio_id = custom_item.get("audio_id", "")
        except Exception as e:
            self.dlog.logger.error("parse_data.error.data:{0}".format(metadata), exc_info=True)

        res = (title, artists, album, acrid, played_duration, label, isrc, upc,
               deezer, spotify, itunes, youtube, custom_files_title, audio_id)
        return res

    def apply_filter(self, results):
        fworker = FilterWorker()
        result_new = fworker.apply_filter(results)
        return result_new

    def do_recognize(self, filepath, start_time, rec_length):
        try:
            current_time = time.strftime('%d %H:%M:%S', time.gmtime(start_time))
            res_data = self.re_handler.recognize_by_file(filepath, start_time, rec_length)
            return filepath, current_time, res_data
        except Exception as e:
            self.dlog.logger.error("do_recognize.error.({0}, {1}, {2})".format(filepath,start_time,rec_length),exc_info=True)
        return filepath, current_time, None

    def recognize_file(self, filepath, start_time, stop_time, step, rec_length, with_duration=0):
        self.dlog.logger.warn("scan_file.start_to_run: {0}".format(filepath))

        result = []
        for i in range(start_time, stop_time, step):
            filep, current_time, res_data = self.do_recognize(filepath, i, rec_length)
            try:
                jsoninfo = json.loads(res_data)
                code = jsoninfo['status']['code']
                msg = jsoninfo['status']['msg']
                if "status" in jsoninfo  and jsoninfo["status"]["code"] ==0 :
                    result.append({"timestamp":current_time, "rec_length":rec_length, "result":jsoninfo, "file":filep})
                    res = self.parse_data(jsoninfo)
                    self.dlog.logger.info('recognize_file.(time:{0}, title: {1})'.format(current_time, res[0]))
                if code == 2005:
                    self.dlog.logger.warn('recognize_file.(time:{0}, code:{1}, Done!)'.format(current_time, code))
                    break
                elif code == 1001:
                    self.dlog.logger.info("recognize_file.(time:{0}, code:{1}, No_Result)".format(current_time, code))
                elif code == 3001:
                    self.dlog.logger.error('recognize_file.(time:{0}, code:{1}, Missing/Invalid Access Key)'.format(current_time, code))
                    break
                elif code == 3003:
                    self.dlog.logger.error('recognize_file.(time:{0}, code:{1}, Limit exceeded)'.format(current_time, code))
                elif code == 3000:
                    self.dlog.logger.error('recognize_file.(time:{0}, {1}, {2})'.format(current_time, code, msg))
                    self.write_error(filepath, i, 'NETWORK ERROR')
                i += step
            except Exception as e:
                self.dlog.logger.error('recognize_file.error', exc_info=True)
                self.write_error(filepath, i, 'JSON ERROR')
        return result


    def scan_file_main(self, option, start_time, stop_time):
        try:
            filepath = option.file_path
            step = option.step
            rec_length = option.rec_length
            with_duration = option.with_duration
            if start_time == 0 and stop_time == 0:
                file_total_seconds =  int(ACRCloudRecognizer.get_duration_ms_by_file(filepath)/1000)
                results = self.recognize_file(filepath, start_time, file_total_seconds, step, rec_length, with_duration)
            else:
                results = self.recognize_file(filepath, start_time, stop_time, step, rec_length, with_duration)

            filename = 'result-' + os.path.basename(filepath.strip()) + '.csv'
            if os.path.exists(filename):
                os.remove(filename)
            if results:
                self.export_to_csv(results, filename)

            if with_duration == 1:
                new_reuslts = []
                if results:
                    new_results = self.apply_filter(results)
                filename_with_duration =  'result-' + os.path.basename(filepath.strip()) + '_with_duration.csv'
                self.export_to_csv(new_results, filename_with_duration)
        except Exception as e:
            self.dlog.logger.error("scan_file_main.error", exc_info=True)


    def scan_folder_main(self, option, start_time, stop_time):
        try:
            path = option.folder_path
            file_list = os.listdir(path)
            for i in file_list:
                option.file_path = path + '/' + i
                self.scan_file_main(option, start_time, stop_time)
        except Exception as e:
            self.dlog.logger.error("scan_folder_main.error", exc_info=True)
 def initLog(self):
     self._dlog = AcrcloudLogger("RecManager", logging.INFO)
     if not self._dlog.addFilehandler(logfile = "RecWorker.log", logdir = self._config['log']['dir']):
         self.exitRecM('rec_error#0#init_flog_error')
     if not self._dlog.addStreamHandler():
         self.exitRecM('rec_error#0#init_slog_error')
class AcrcloudSpringboard:
    def __init__(self, manager, config, dworker, rworker, sworker):
        self.manager = manager
        self.config = config
        self.dworker = dworker
        self.rworker = rworker
        self.sworker = sworker
        self.access_key = self.config['user']['access_key']
        #self.access_secret = self.config['user']['access_secret']
        self.api_url = self.config['user']['api_url']
        self.stream_ids = self.config.get("stream_ids", [])
        self.record = int(self.config['record']['record'])
        self.record_before = int(self.config['record']['record_before'])
        self.record_after = int(self.config['record']['record_after'])
        self.addkeys = [
            'access_key', 'access_secret', 'rec_host', 'stream_id',
            'stream_url', 'interval', 'monitor_length', 'monitor_timeout',
            'rec_timeout'
        ]
        self.mainQueue = multiprocessing.Queue()
        self.shareStatusDict = multiprocessing.Manager().dict()
        self.shareMonitorDict = multiprocessing.Manager().dict()
        self.shareDict = multiprocessing.Manager().dict()
        self.initLog()
        self.initManager()
        self.initStreams()

    def initLog(self):
        self.colorfmt = "$MAGENTA%(asctime)s - $RED%(name)-20s$RESET - $COLOR%(levelname)-8s$RESET - $COLOR%(message)s$RESET"
        self.dlog = AcrcloudLogger('Acrcloud@Springboard', logging.INFO)
        if not self.dlog.addStreamHandler(self.colorfmt):
            sys.exit(1)

    def initManager(self):
        try:
            self.manager_proc = multiprocessing.Process(
                target=self.manager,
                args=(self.mainQueue, self.config, self.shareMonitorDict,
                      self.shareStatusDict, self.shareDict, self.dworker,
                      self.rworker, self.sworker))
            self.manager_proc.start()
            if not self.manager_proc.is_alive():
                self.dlog.logger.error(
                    'Error@Springboard:create manager process failed, it will stop'
                )
                sys.exit(1)
            else:
                self.dlog.logger.warn('Warn@Springboard:manager init success')
        except Exception as e:
            self.dlog.logger.error(
                'Error@Springboard:init manager failed, it will stop',
                exc_info=True)
            sys.exit(1)

    def checkInfo(self, info):
        if len(info) >= 8:
            for key in self.addkeys:
                if info.get(key, 'None') == 'None':
                    return False
            return True
        return False

    def changeStat(self, id, index, msg):
        stat = self.shareStatusDict[id]
        stat[index] = msg
        self.shareStatusDict[id] = stat

    def changeMon(self, id, index, value):
        tmp = self.shareMonitorDict[id]
        tmp[index] = value
        self.shareMonitorDict[id] = tmp

    def getPage(self, url, referer=None):
        response = ''
        for i in range(2):
            request = urllib2.Request(url)
            request.add_header(
                "User-Agent",
                "Mozilla/5.0 (Windows NT 6.1; rv:14.0) Gecko/20100101 Firefox/14.0.1"
            )
            if referer:
                request.add_header("Referer", referer)
            try:
                response = urllib2.urlopen(request)
                if response:
                    result = response.read()
                    response.close()
                    return result
            except Exception, e:
                traceback.print_exc()
                if response:
                    response.close()
        return ''
 def initLog(self):
     self._dlog = AcrcloudLogger("SWorker_{0}.log".format(self._stream_id), logging.INFO)
     if not self._dlog.addFilehandler(logfile = "SWorker_{0}.log".format(self._stream_id), logdir = self._config['log']['dir']):
         sys.exit(1)
     if not self._dlog.addStreamHandler():
         sys.exit(1)
class AcrcloudManager:
    def __init__(self, springboard):
        self.monitor = springboard
        self.sockNum = 0
        self.sockIndex = 1
        self.client2id = {}
        self.id2client = {}
        self.initLog()

    def initLog(self):
        self.colorfmt = "$MAGENTA%(asctime)s$RESET - $RED%(name)-20s$RESET - $COLOR%(levelname)-8s$RESET - $COLOR%(message)s$RESET"
        self.dlog = AcrcloudLogger('Client@Main', logging.INFO)
        if not self.dlog.addStreamHandler(self.colorfmt):
            sys.exit(1)

    def addClient(self, client):
        self.sockNum = self.sockNum + 1
        self.client2id[client] = self.sockIndex
        self.id2client[self.sockIndex] = client
        self.dlog.logger.info('New Client, ID: {0}'.format(self.sockIndex))
        self.sockIndex = self.sockIndex + 1

    def delClient(self, client):
        if client in self.client2id:
            self.sockNum = self.sockNum - 1
            _sockid = self.client2id[client]
            del self.client2id[client]
            del self.id2client[_sockid]
            self.dlog.logger.info('Close Client, ID: {0}'.format(_sockid))

    def getSockid(self, client):
        if client in self.client2id:
            return self.client2id[client]
        else:
            return None

    def getClient(self, sockid):
        if sockid in self.id2client:
            return self.id2client[sockid]
        else:
            return None

    def recData(self, recdata):
        datainfo = recdata[:-2].split('\r\n', 1)
        if len(datainfo) == 2:
            cmd_info, data_block = datainfo
            cmd_info = cmd_info.split()
            if len(cmd_info) != 5:
                return 'ERROR'
            if cmd_info[0] == 'set':
                ret = ''
                if cmd_info[1] == 'restart':
                    ret = self.monitor.reStart(data_block)
                elif cmd_info[1] == 'refresh':
                    ret = self.monitor.reFresh()
                elif cmd_info[1] == 'pause':
                    ret = self.monitor.pauseM(data_block)
                elif cmd_info[1] == 'stop':
                    ret = self.monitor.stop()
                else:
                    ret = "NOT_STORED"
                return ret
            else:
                return "ERROR"
        elif len(datainfo) == 1:
            cmd_info = datainfo[0].split()
            if cmd_info[0] == 'get':
                ret = ''
                if cmd_info[1].startswith('state:'):
                    id = cmd_info[1].split(':')[1]
                    sd = self.monitor.getStat(id.strip())
                    return 'VALUE {0} 0 {1}\r\n{2}'.format(
                        cmd_info[1], len(sd), sd)
                else:
                    return "END"
            else:
                return 'ERROR'
        else:
            return "ERROR"
 def initLog(self):
     self.colorfmt = "$MAGENTA%(asctime)s$RESET - $RED%(name)-20s$RESET - $COLOR%(levelname)-8s$RESET - $COLOR%(message)s$RESET"
     self.dlog = AcrcloudLogger('Client@Main', logging.INFO)
     if not self.dlog.addStreamHandler(self.colorfmt):
         sys.exit(1)
Exemple #27
0
class AcrcloudWorker:
    def __init__(self, info, mainqueue, recqueue, shareStatusDict, shareDict,
                 config):
        self._info = info
        self._downloadFun = None
        self._config = config
        self._mainqueue = mainqueue
        self._recqueue = recqueue
        self._shareStatusDict = shareStatusDict
        self._shareDict = shareDict
        self._workQueue = Queue.Queue()
        self._download_cmdQueue = Queue.Queue()
        self._downloadHandler = None
        self._collectHandler = None
        self._stream_id = str(info.get('stream_id', ''))
        self.initLog()
        self.tools_url = Tools_Url()
        self.initConfig(info)
        self.init_url_info()
        self.isFirst = True

    def initLog(self):
        self._dlog = AcrcloudLogger("SWorker_{0}.log".format(self._stream_id),
                                    logging.INFO)
        if not self._dlog.addFilehandler(logfile="SWorker_{0}.log".format(
                self._stream_id),
                                         logdir=self._config['log']['dir']):
            sys.exit(1)
        if not self._dlog.addStreamHandler():
            sys.exit(1)

    def initConfig(self, info):
        try:
            self._dlog.logger.info('initConfig start...')

            self._rec_host = str(info.get('rec_host', ''))
            self._access_key = str(info.get('access_key', ''))
            self._access_secret = str(info.get('access_secret', ''))
            self._stream_url = str(info.get('stream_url', ''))
            self._stream_spare_urls = [
                url.strip() for url in info.get('stream_spare_urls', [])
                if url.strip()
            ]
            self._stream_spare_urls += [
                url.strip() for url in info.get('stream_urls', [])
                if url.strip()
            ]
            if self._stream_url:
                self._stream_spare_urls = self._stream_spare_urls + [
                    self._stream_url
                ]
            self._stream_spare_urls = list(set(self._stream_spare_urls))

            self._monitor_interval = info.get('interval', 5)
            self._monitor_length = info.get('monitor_length', 20)
            self._monitor_timeout = info.get('monitor_timeout', 30)
            self._timeout_Threshold = 20  #self._config["server"]["timeout_Threshold"]
            self._rec_timeout = info.get('rec_timeout', 5)
            self.baseRebornTime = 20  #self._config["server"]["reborn_Time_Sec"]
            self.rebornTime = 0
            self.deadThreshold = 20  #self._config["server"]["dead_Threshold"]
            self.isFirst = True

            if self._monitor_timeout <= self._monitor_interval + self._monitor_length:
                self._monitor_timeout = self._monitor_interval + self._monitor_length + 2
            self._downloadFun = acrcloud_download
            if not self._downloadFun:
                self._dlog.logger.error('init downloadFunc error')
                self.changeStat(0, "8#error@downloadfun_init")
                sys.exit(1)
        except Exception as e:
            self._dlog.logger.error('*****@*****.**',
                                    exc_info=True)
            sys.exit(1)

    def init_url_info(self):
        try:
            self._url_map = {
                "url_index": -1,
                "url_list": [],
                "url_list_size": 0,
                "parse_url_index": -1,
                "parse_url_list": [],
                "parse_url_list_size": 0,
                "valid_url_index": set(),  #item: (url_index, parse_url_index)
                "valid_url_try": False,
                "rtsp_protocol": ["udp", "tcp"],
                "rtsp_protocol_index": 0,  #一般默认rtsp流是使用udp协议
                "rtsp_protocol_size": 2,
            }
            self._stream_url_now = self._stream_url
            if self._stream_url:
                self._url_map["url_list"].append(self._stream_url)
            if self._stream_spare_urls:
                self._url_map["url_list"].extend(self._stream_spare_urls)

            self._url_map["url_index"] = -1
            self._url_map["url_list"] = list(set(self._url_map["url_list"]))
            self._url_map["url_list_size"] = len(self._url_map["url_list"])
            self.change_stream_url()
        except Exception as e:
            self._dlog.logger.error(
                "Error@Worker_DownloadStream.init_url_info", exc_info=True)

    def change_stream_url(self):
        try:
            if (self._url_map["url_index"], self._url_map["parse_url_index"]
                ) in self._url_map[
                    "valid_url_index"] and not self._url_map["valid_url_try"]:
                self._url_map["valid_url_try"] = True
            else:
                if (self._url_map["parse_url_index"] >= 0
                    ) and self._stream_url_now.startswith(
                        "rtsp") and self._url_map["rtsp_protocol_index"] == 0:
                    self._url_map["rtsp_protocol_index"] += 1
                else:
                    if (self._url_map["parse_url_index"] == -1) or (
                        (self._url_map["parse_url_index"] + 1)
                            == self._url_map["parse_url_list_size"]):
                        self._url_map["url_index"] = (
                            self._url_map["url_index"] +
                            1) % self._url_map["url_list_size"]
                        self._url_map["parse_url_list"] = list(
                            set(
                                self.tools_url.do_analysis_url(
                                    self._url_map["url_list"][
                                        self._url_map["url_index"]])))
                        self._url_map["parse_url_list_size"] = len(
                            self._url_map["parse_url_list"])
                        self._url_map["parse_url_index"] = 0
                        self._url_map["rtsp_protocol_index"] = 0
                        self._url_map["valid_url_try"] = False
                        self._stream_url_now = self._url_map["parse_url_list"][
                            self._url_map["parse_url_index"]]
                    else:
                        self._url_map["parse_url_index"] += 1
                        self._url_map["rtsp_protocol_index"] = 0
                        self._url_map["valid_url_try"] = False
                        self._stream_url_now = self._url_map["parse_url_list"][
                            self._url_map["parse_url_index"]]
            self._dlog.logger.warning(
                'Warn@Worker_DownloadStream.change_stream_url.do_change.now_url: {0}\nurl_map: {1}'
                .format(self._stream_url_now, self._url_map))
        except Exception as e:
            self._dlog.logger.error(
                'Error@Worker_DownloadStream.change_stream_url, url_map: {0}'.
                format(self._url_map),
                exc_info=True)

    def changeStat(self, index, msg):
        stat = self._shareStatusDict[self._stream_id]
        stat[index] = msg
        self._shareStatusDict[self._stream_id] = stat

    def newStart(self):
        self._collectHandler = Worker_CollectData(
            self._rec_host, self._stream_id, self._stream_url_now,
            self._access_key, self._access_secret, self._workQueue,
            self._recqueue, self._shareDict, self._dlog.logger,
            self._monitor_length, self._monitor_interval,
            self._monitor_timeout, self._timeout_Threshold)
        self._collectHandler.start()

        self._downloadHandler = Worker_DownloadStream(
            self._stream_url_now, self._workQueue, self._download_cmdQueue,
            self._downloadFun, self._dlog.logger, self._monitor_timeout,
            self._timeout_Threshold, self.isFirst)
        self.isFirst = False
        self._downloadHandler.start()

    def nowStop(self):
        self._downloadHandler.stop()
        self._collectHandler.stop()

    def deal_mainCMD(self, recv):
        isbreak = False
        if recv == 'STOP':
            self.nowStop()
            self._dlog.logger.warn("mainQueue receive 'STOP' & JUST STOP")
            isbreak = True
        elif recv == 'PAUSE':
            self.pauseflag = True
            self.nowStop()
            self._dlog.logger.warn("mainQueue receive 'PAUSE' & JUST PAUSE")
            self.changeStat(0, "4#pause")
        elif recv == 'RESTART':
            self.nowStop()
            self.newStart()
            self.pauseflag = False
        return isbreak

    def deal_workerCMD(self, recv_thread):
        isbreak = False
        if recv_thread.startswith("STATUS"):
            status = recv_thread.split("#")
            self.changeStat(0, recv_thread[len('STATUS#'):])
            if status[1] == '2':
                self._dlog.logger.warn("cmdQueue receive 'DEAD' & JUST SLEEP")
                self.nowStop()
                self.deadcount += 1
                self.rebornTime = 5 * 60  #self.baseRebornTime * self.deadcount
                self.deadflag = True
                self.deadTime = datetime.datetime.now()
                if self.deadcount >= self.deadThreshold:
                    self.killedcount += 1
                    self.killedflag = True
                    self.deadflag = False
                    self.deadcount = 0
                    self.changeStat(0, "3#killed")
                    self._dlog.logger.error(
                        "Dead Count Reach Threshold({0}), Monitor will killed".
                        format(self.deadThreshold))
                    self.killedTime = datetime.datetime.now()
            elif status[1] == '3':
                pass
            elif status[1] == '6':
                self._dlog.logger.error(
                    "Invalid Stream_Url, This Monitor will wait to retry")
                self.nowStop()
                self.invalid_url_flag = True
                self.invalid_url_time = datetime.datetime.now()
                self.deadflag = False
                self.deadcount = 0
                self.killedflag = False
                self.killedcount = 0
            elif status[1] == '0':
                self.deadcount = 0
                self.killedcount = 0
        elif recv_thread.startswith("ISVIDEO"):
            self.changeStat(1, recv_thread[len('ISVIDEO#'):])

    def start(self):
        self.newStart()
        self.deadTime = None
        self.deadflag = False
        self.deadcount = 0
        self.pauseflag = False
        self.killedTime = None
        self.killedflag = False
        self.killedcount = 0
        self.killed_reborn_hours = 1
        self.invalid_url_flag = False
        self.invalid_url_time = None
        self.invalid_url_rebornTime = 30  #2 hours
        while 1:
            recv = ''
            recv_thread = ''
            if self.invalid_url_flag:
                invalidpassTime = (datetime.datetime.now() -
                                   self.invalid_url_time).total_seconds()
                if invalidpassTime % (20) == 0:
                    self._dlog.logger.warn(
                        "Invalid URL Worker Restart Time: {0}s/{1}s".format(
                            invalidpassTime, self.invalid_url_rebornTime))
                if invalidpassTime >= self.invalid_url_rebornTime:
                    self._dlog.logger.warn("Invalid URL Try Restart...")
                    self.change_stream_url()
                    self.newStart()
                    self.invalid_url_time = None
                    self.invalid_url_flag = False

            if self.deadflag:
                passTime = (datetime.datetime.now() - self.deadTime).seconds
                if passTime % 30 == 0:
                    self._dlog.logger.warn(
                        "Worker Reborn Time: {0}s/{1}s".format(
                            passTime, self.rebornTime))
                if passTime >= self.rebornTime:
                    self._dlog.logger.warn("Worker Reborn...")
                    self.change_stream_url()
                    self.newStart()
                    self.deadTime = None
                    self.deadflag = False

            if self.killedflag:
                killedpassTime = (datetime.datetime.now() -
                                  self.killedTime).seconds
                if self.killedcount in range(1, 6):
                    self.killed_reborn_hours = 0.5  #pow(2, self.killedcount-1)
                elif self.killedcount >= 6:
                    self.killed_reborn_hours = 0.5  #pow(2, 5)
                else:
                    self.killed_reborn_hours = 0.5  #1
                if killedpassTime % 500 == 0:
                    self._dlog.logger.warn(
                        "Killed Worker Reborn Time: {0}/{1} (hours)".format(
                            round(killedpassTime / 3600.0, 2),
                            self.killed_reborn_hours))
                if killedpassTime >= self.killed_reborn_hours * 3600:
                    self._dlog.logger.warn("Killed Worker Reborn...")
                    self.change_stream_url()
                    self.newStart()
                    self.killedTime = None
                    self.killedflag = False
            try:
                recv = self._mainqueue.get(block=False)
            except Queue.Empty:
                time.sleep(0.5)
            if self.deal_mainCMD(recv):
                break

            try:
                recv_thread = self._download_cmdQueue.get(block=False)
            except Queue.Empty:
                time.sleep(0.5)
            if self.deal_workerCMD(recv_thread):
                break
            time.sleep(0.1)