Esempio n. 1
0
        logger.info("Trying to roll back to the original files...")
        StoWebHdfs.remove(File_dir_Path)
        StoWebHdfs.rename(BakcupPathHdfs_NEW,File_dir_Path)
        if StoWebHdfs.status == 200:
            MysqlObj.writeStatus("success",IsWaterMark,IsOverWrite,info="Cannot watermark, but files have been rolled back")
        else:
            MysqlObj.writeStatus("fail",IsWaterMark,IsOverWrite,info="Cannot watermark, and FAILED to roll back")
            sys.exit()      
    else:
        sys.exit()    
else:
    logger.info("Successfully process the file ......")
    MysqlObj.writeStatus("success",IsWaterMark,IsOverWrite,info="Successfully process the file!")
    if IsOverWrite:
        StoWebHdfs.remove(BakcupPathHdfs_NEW)
        RedisObj = baseclass.interWithRedis(logger,vid)
        RedisObj.WriteUrlToCacheList("cacheurl_list","/%s/%s"%(uid,vid))
        if StoWebHdfs.status == 200:
            logger.info("Successfully delete the backup directory")
        else:
            logger.info("WARNING: Failed to detele the backup directory!")


#删除web端原始视频文件
RunCmdObj.run(["rm -f %s"%VideoFilePath],QuitFlag=False)

#清理临时文件
RunCmdObj.run(["rm -rf /tmp/%s/"%(info_dict['name'])])

#删除mapreducer集群中中间数据
MapWebHdfs.remove("/%s" % vid)
Esempio n. 2
0
     hadoopinfo_dict = pickle.load(open('hadoop.info','r'))
     
     #初始化mysql对象
     MysqlObj = baseclass.interWithMysql(logger,info_dict['name'])
     
 except Exception,e:
     logger.error("reducer init exception: %s"%e)
     MysqlObj.writeStatus("fail",info_dict['iswatermark'],info_dict['isoverwrite'],info="reducer init exception: %s"%e)
     sys.exit()
 
 #重新初始化日志对象和mysql对象
 logger = baseclass.getlog(info_dict['name'],loglevel=info_dict['loglevel'])  
 MysqlObj = baseclass.interWithMysql(logger,info_dict['name'])
 
 #初始化redis对象
 RedisObj = baseclass.interWithRedis(logger,info_dict['name'])
 
 # --/
 #     Execute commands stage
 # --/
 
 try:
     RunCmdObj = baseclass.runCmd(logger,info_dict['name'],info_dict['iswatermark'],info_dict['isoverwrite'])
     
     VideoObj = reduceCmdOutput(info_dict,hadoopinfo_dict,logger)
     
     RunCmdObj.run(VideoObj.initReducerEnv(),QuitFlag=False)
     
     #初始化map hdfs对象
     MapHdfsHost = VideoObj.HadoopNNAddr.split(":")[0]
     MapWebHdfs = baseclass.WebHadoopOld(MapHdfsHost,"50071","cloudiyadatauser",logger)
Esempio n. 3
0
                "fail",
                IsWaterMark,
                IsOverWrite,
                info="Cannot watermark, and FAILED to roll back")
            sys.exit()
    else:
        sys.exit()
else:
    logger.info("Successfully process the file ......")
    MysqlObj.writeStatus("success",
                         IsWaterMark,
                         IsOverWrite,
                         info="Successfully process the file!")
    if IsOverWrite:
        StoWebHdfs.remove(BakcupPathHdfs_NEW)
        RedisObj = baseclass.interWithRedis(logger, vid)
        RedisObj.WriteUrlToCacheList("cacheurl_list", "/%s/%s" % (uid, vid))
        if StoWebHdfs.status == 200:
            logger.info("Successfully delete the backup directory")
        else:
            logger.info("WARNING: Failed to detele the backup directory!")

#删除web端原始视频文件
RunCmdObj.run(["rm -f %s" % VideoFilePath], QuitFlag=False)

#清理临时文件
RunCmdObj.run(["rm -rf /tmp/%s/" % (info_dict['name'])])

#删除mapreducer集群中中间数据
MapWebHdfs.remove("/%s" % vid)
Esempio n. 4
0
    except Exception, e:
        logger.error("reducer init exception: %s" % e)
        MysqlObj.writeStatus("fail",
                             info_dict['iswatermark'],
                             info_dict['isoverwrite'],
                             info="reducer init exception: %s" % e)
        sys.exit()

    #重新初始化日志对象和mysql对象
    logger = baseclass.getlog(info_dict['name'],
                              loglevel=info_dict['loglevel'])
    MysqlObj = baseclass.interWithMysql(logger, info_dict['name'])

    #初始化redis对象
    RedisObj = baseclass.interWithRedis(logger, info_dict['name'])

    # --/
    #     Execute commands stage
    # --/

    try:
        RunCmdObj = baseclass.runCmd(logger, info_dict['name'],
                                     info_dict['iswatermark'],
                                     info_dict['isoverwrite'])

        VideoObj = reduceCmdOutput(info_dict, hadoopinfo_dict, logger)

        RunCmdObj.run(VideoObj.initReducerEnv(), QuitFlag=False)

        #初始化map hdfs对象