Beispiel #1
0
    def nova_callback(self, ch, method, properties, body):
        """
        Method used by method nova_amq() to filter messages by type of message.

        :param ch: refers to the head of the protocol
        :param method: refers to the method used in callback
        :param properties: refers to the proprieties of the message
        :param body: refers to the message transmitted
        """
        payload = json.loads(body)
        try:
            tenant_name = payload['_context_project_name']
            type_of_message = payload['event_type']
            
            if type_of_message == 'compute.instance.create.end':
                instance_id = payload['payload']['instance_id']
                instance_name = payload['payload']['hostname']
                self.zabbix_handler.create_host(instance_name, instance_id, tenant_name)
                log.info("Creating a host in Zabbix Server :%s" %(instance_name+"-"+instance_id))

            elif type_of_message == 'compute.instance.delete.end':
                host = payload['payload']['instance_id']
                try:
                    host_id = self.zabbix_handler.find_host_id(host)
                    self.zabbix_handler.delete_host(host_id)
                    log.info("Deleting host from Zabbix Server %s " %host_id )
                except Exception,e:
                    log.error(str(e))    # TODO
        except Exception,e:
             print e
             log.error(str(e))    # TODO
Beispiel #2
0
def get_var_type(typ):
    global glb_types
    #Not arrays.Return new copy of Var_type
    [rest_typ,pointer_dim]=get_pointer_dim(typ)
    temp_type=execution.Var_type()
 
    if rest_typ in glb_typ:
        temp_glb_typ=glb_typ[rest_typ]
        temp_typ.size=temp_glb_typ.size
        temp_typ.element_list=temp_glb_typ.element_list
        temp_typ.typ=temp_glb_typ.typ
    else:
        grp=re.match("i([\d]+)",typ)
        if(not(grp==None)):
            temp_type.typ="integer"
            temp_type.size=int(grp.group(1))
        else:
            log.error("Unknown typ:"+str(typ))
            #Could be that that typ has not been defined yet but is being used
            #Don't support it.Need to add code here for that.
            #Use glb_temp_types when need to add support for this.

    if(not(pointer_dim==None)):
        temp_type.ispointer=1
        temp_type.pointer_dim=pointer_dim
        temp_type.size=32
        temp_type.pointer_typ=temp_typ.typ
        temp_type.typ="pointer"
    
    return temp_type    
Beispiel #3
0
    def run(self):
        global TOTAL_IMAGE_COUNT

        # 如果文件已经存在,则使用临时文件名保存
        if os.path.exists(self.file_path):
            is_exist = True
            file_path = self.file_temp_path
        else:
            is_exist = False
            file_path = self.file_path

        if tool.save_net_file(self.file_url, file_path):
            if check_invalid_image(file_path):
                os.remove(file_path)
                log.step("%s的封面图片无效,自动删除" % self.title)
            else:
                log.step("%s的封面图片下载成功" % self.title)
                if is_exist:
                    # 如果新下载图片比原来大,则替换原本的;否则删除新下载的图片
                    if os.path.getsize(self.file_temp_path) > os.path.getsize(self.file_path):
                        os.remove(self.file_path)
                        os.rename(self.file_temp_path, self.file_path)
                    else:
                        os.remove(self.file_temp_path)

                self.thread_lock.acquire()
                TOTAL_IMAGE_COUNT += 1
                self.thread_lock.release()
        else:
            log.error("%s的封面图片 %s 下载失败" % (self.title, self.file_url))
 def connect_gracc_url(self, gracc_url):
     try:
         self.es = elasticsearch.Elasticsearch(
             [gracc_url], timeout=300, use_ssl=True, verify_certs=True,
             ca_certs='/etc/ssl/certs/ca-bundle.crt')
     except Exception, e:
         log.exception(e)
         log.error("Unable to connect to GRACC database")
         raise
def main():

    try:
        main_unwrapped()
    except SystemExit:
        raise
    except (Exception, KeyboardInterrupt), e:
        log.error(str(e))
        log.exception(e)
        raise
Beispiel #6
0
def cmd_args_parse():
    #Currently script_name secondary_ir lineno
    log.info("Command args:"+str(sys.argv))
    if(len(sys.argv)!=3):#Script+InputName+Lineno
        log.error("Command args number:"+str(len(sys.argv)))
        sys.exit("Improper arguments")
    else:
        #Return input_file_name , Lineno
        return [sys.argv[1],int(sys.argv[2])]
    return
Beispiel #7
0
def get_branch_name(label):
    #To get names of branch which is same as branch label
    #Just remove % appended in starting.I don't think any global branch exist.
    if(label[0]=="%"):
        return label[1:]
    elif(label[0]=="@"):
        log.error("Unexpected @ in label name "+str(label))
        return label[1:]
    else:
        return label
Beispiel #8
0
    def main(self):
        page_count = 1
        image_count = 0
        main_thread_count = threading.activeCount()
        # 多线程下载类型
        # 1 同时开始N个下载线程
        # 2 对一页中的所有图片开启多线程下载,下完一页中的所有图片后开始下一页
        thread_type = 2
        while True:
            # 获取一页页面
            page_data = get_one_page_data(page_count)
            if page_data is None:
                log.error("第%s页获取失败" % page_count)
                break

            # 获取页面中的所有图片信息列表
            image_info_list = re.findall('<img src="" data-original="([^"]*)" class="lazy img" title="([^"]*)">', page_data)
            # 获取页面中的影片数量
            page_data_count = page_data.count('<div class="item pull-left">')

            # 已经下载完毕了
            if page_data_count == 0:
                break
            log.step("第%s页,影片数量%s,获取到的封面图片数量%s" % (page_count, len(image_info_list), page_data_count))

            for small_image_url, title in image_info_list:
                # 达到线程上限,等待
                while thread_type == 1 and threading.activeCount() >= self.thread_count + main_thread_count:
                    time.sleep(5)

                title = robot.filter_text(str(title)).upper()
                image_url = get_large_image_url(small_image_url)
                if image_url is None:
                    log.trace("%s的封面图片大图地址获取失败" % title)
                    continue

                log.step("开始下载%s的封面图片 %s" % (title, image_url))

                file_type = image_url.split(".")[-1]
                file_path = os.path.join(self.image_download_path, "%s.%s" % (title, file_type))
                file_temp_path = os.path.join(self.image_download_path, "%s_temp.%s" % (title, file_type))

                # 开始下载
                thread = Download(self.thread_lock, title, file_path, file_temp_path, image_url)
                thread.start()
                time.sleep(0.1)

            # 还有未完成线程
            while thread_type == 2 and threading.activeCount() > main_thread_count:
                time.sleep(5)

            page_count += 1

        log.step("全部下载完毕,耗时%s秒,共计图片%s张" % (self.get_run_time(), TOTAL_IMAGE_COUNT))
    def connect(self):
        gracc_url = self.cp.get("GRACC Transfer", "Url")
        #gracc_url = 'https://gracc.opensciencegrid.org/q'

        try:
            self.es = elasticsearch.Elasticsearch(
                [gracc_url], timeout=300, use_ssl=True, verify_certs=True,
                ca_certs='/etc/ssl/certs/ca-bundle.crt')
        except Exception, e:
            log.exception(e)
            log.error("Unable to connect to GRACC database")
            raise
def configure():
    usage = "usage: %prog -c config_file"
    parser = optparse.OptionParser()
    parser.add_option("-c", "--config", help="PR Graph config file",
        dest="config", default="/etc/osg_display/osg_display.conf")
    parser.add_option("-q", "--quiet", help="Reduce verbosity of output",
        dest="quiet", default=False, action="store_true")
    parser.add_option("-d", "--debug", help="Turn on debug output",
        dest="debug", default=False, action="store_true")
    parser.add_option("-T", "--notimeout",
        help="Disable alarm timeout; useful for initial run",
        dest="notimeout", default=False, action="store_true")
    opts, args = parser.parse_args()

    if not opts.config:
        parser.print_help()
        print
        log.error("Must pass a config file.")
        sys.exit(1)

    log.handlers = []

    if not opts.quiet:
        handler = logging.StreamHandler(sys.stdout)
        log.addHandler(handler)

    for handler in log.handlers:
        formatter = logging.Formatter("%(asctime)s - %(levelname)s - " \
            "%(message)s")
        handler.setFormatter(formatter)

    if opts.debug:
        log.setLevel(logging.DEBUG)

    if not opts.quiet:
        log.info("Reading from log file %s." % opts.config)

    cp = ConfigParser.SafeConfigParser()
    cp.readfp(open(opts.config, "r"))

    cp.notimeout = opts.notimeout

    logging.basicConfig(filename=cp.get("Settings", "logfile"))

    for handler in log.handlers:
        formatter = logging.Formatter("%(asctime)s - %(levelname)s - " \
            "%(message)s")
        handler.setFormatter(formatter)

    return cp
Beispiel #11
0
def get_job_id(handle):
    """
    Read local job ID from ``bsub`` output.

    :param object handle: sbatch handle
    :return: local job ID if found, else ``None``
    :rtype: :py:obj:`str`
    """
    
    for line in handle.stdout:
        match = re.search(r'Job <(\d+)> .+', line)
        if match:
            return match.group(1)
    error('Job ID not found in stdout', 'lsf.Submit')
Beispiel #12
0
    def main(self):
        global ACCOUNTS

        # 解析存档文件
        # account_name  image_count  video_count  last_created_time
        account_list = robot.read_save_data(self.save_data_path, 0, ["", "0", "0", "0"])
        ACCOUNTS = account_list.keys()

        if not set_csrf_token():
            log.error("token和session获取查找失败")
            tool.process_exit()

        # 循环下载每个id
        main_thread_count = threading.activeCount()
        for account_name in sorted(account_list.keys()):
            # 检查正在运行的线程数
            while threading.activeCount() >= self.thread_count + main_thread_count:
                if robot.is_process_end() == 0:
                    time.sleep(10)
                else:
                    break

            # 提前结束
            if robot.is_process_end() > 0:
                break

            # 开始下载
            thread = Download(account_list[account_name], self.thread_lock)
            thread.start()

            time.sleep(1)

        # 检查除主线程外的其他所有线程是不是全部结束了
        while threading.activeCount() > main_thread_count:
            time.sleep(10)

        # 未完成的数据保存
        if len(ACCOUNTS) > 0:
            new_save_data_file = open(NEW_SAVE_DATA_PATH, "a")
            for account_name in ACCOUNTS:
                new_save_data_file.write("\t".join(account_list[account_name]) + "\n")
            new_save_data_file.close()

        # 删除临时文件夹
        self.finish_task()

        # 重新排序保存存档文件
        robot.rewrite_save_file(NEW_SAVE_DATA_PATH, self.save_data_path)

        log.step("全部下载完毕,耗时%s秒,共计图片%s张,视频%s个" % (self.get_run_time(), TOTAL_IMAGE_COUNT, TOTAL_VIDEO_COUNT))
Beispiel #13
0
def auto_redirect_visit(url):
    page_return_code, page_response = tool.http_request(url)[:2]
    if page_return_code == 1:
        # 有重定向
        redirect_url_find = re.findall('location.replace\(["|\']([^"|^\']*)["|\']\)', page_response)
        if len(redirect_url_find) == 1:
            return auto_redirect_visit(redirect_url_find[0])
        # 没有cookies无法访问的处理
        if page_response.find("用户名或密码错误") != -1:
            log.error("登陆状态异常,请在浏览器中重新登陆微博账号")
            tool.process_exit()
        # 返回页面
        if page_response:
            return str(page_response)
    return False
Beispiel #14
0
def get_job_id(handle):
    """
    Read local job ID from ``sbatch`` output.

    :param object handle: sbatch handle
    :return: local job ID if found, else ``None``
    :rtype: :py:obj:`str`
    """

    for f in (handle.stdout, handle.stderr):
        for line in f:
            match = re.search(r"Submitted batch job (\d+)", line)
            if match:
                return match.group(1)
    error("Job ID not found in stdout or stderr", "slurm.Submit")
Beispiel #15
0
def backward_execution(glb_lines,start_line,glb_func,glb_types):
    return
    #Main backward execution    
    current_path=path(start_line)
    current_path.function=find_current_function(start_line,glb_func)
    current_path_list=[] 
    current_path_list.append(current_path)

    log.debug("Starting Execution-Start line:"+str(start_line))
    log.debug("Start Function:"+str(current_path.function.name))
    successful_path_list=[]
    
    while (len(current_path_list)!=0):
        temp_path_list=""
        for temp_path in current_path_list:
            temp_path_list=temp_path_list+" "+(str(temp_path.lineno))
        log.debug("Current Path Buffer:"+temp_path_list)
        next_path_list=[]
        for i in range(0,len(current_path_list)):
            current_path=current_path_list.pop()
            log.debug("Current Path"+str(current_path.lineno))
            list_next_path=execute_line(current_path,glb_lines,glb_func)
            temp_path_list=""
            for j in range(0,len(list_next_path)):
                temp_path_list=temp_path_list+" "+str(list_next_path[j].lineno)
                if(list_next_path[j].stop==0):
                    next_path_list.append(list_next_path[j])
                elif(list_next_path[j].end_reached==0):
                    impossible_path_list.append(list_next_path[j])
                    log.debug("Impossible Path"+str(list_next_path[j].flow))
                    log.debug("Path alias:")
                    for temp_alias in list_next_path[j].path_var_alias_assign:
                        log.debug(str(list_next_path[j].path_var_alias_assign[temp_alias].__dict__))
                    log.debug("Path constraints are:")
                    for constraint in list_next_path[j].path_constraint:
                        if(constraint.elements==3):
                            log.debug("Contraint:"+str(constraint.typ)+"LHS is:"+str(constraint.lhs.name)+"RHS is:"+str(constraint.rhs.name)+ "RHS1 is:"+str(constraint.rhs1.name))
                        elif(constraint.elements==2):
                            log.debug("Contraint:"+str(constraint.typ)+"LHS is:"+str(constraint.lhs.name)+"RHS is:"+str(constraint.rhs.name))
                        else:
                            log.error("Unsupported no. of elements in constraint="+str(constraint.elements))

                else:
                    successful_path_list.append(list_next_path[j])
                    log.debug("Successful Path"+str(list_next_path[j].flow))

            log.debug("Return Path List:"+str(temp_path_list))
        current_path_list=next_path_list
Beispiel #16
0
def update_state(remote_ip, vm_name, action, state):
    """
    @cmview_ci
    @param_post{remote_ip,string}
    @param_post{vm_name}
    @param_post{action}
    @param_post{state}
    """
    try:
        node = Node.objects.get(address=remote_ip)
    except:
        raise CMException('node_not_found')

    try:
        vm_id = int(vm_name.split('-')[1])
        user_id = int(vm_name.split('-')[2])
    except:
        log.debug(0, "Unknown vm from hook: %s" % vm_name)
        raise CMException('vm_not_found')

    if action != "stopped":
        log.debug(user_id, "Not updating vm state: action is %s" % str(action))
        return ''

    try:
        VM.objects.update()
        vm = VM.objects.get(id=vm_id)
    except:
        log.error(user_id, 'Cannot find vm in database!')
        raise CMException('vm_not_found')

    if not vm.state in [vm_states['running ctx'], vm_states['running']]:
        log.error(user_id, 'VM is not running!')
        raise CMException('vm_not_running')

    if vm.state == vm_states['restart']:
        raise CMException('vm_restart')

    thread = VMThread(vm, 'delete')
    thread.start()

    return ''
Beispiel #17
0
 def send(self,send_data):
     try:
         so = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
         so.connect((self.zabbix_host, self.zabbix_port))
         #wobj = so.makefile(u'wb')
         #wobj.write(send_data)
         #wobj.close()
         #robj = so.makefile(u'rb')
         #recv_data = robj.read()
         #robj.close()
         #so.close()
         #tmp_data = struct.unpack("<4sBq" + str(len(recv_data) - struct.calcsize("<4sBq")) + "s", recv_data)
         so.sendall(send_data)
         recv_data = so.recv(1024)
         so.close()
         tmp_data = struct.unpack("<4sBq" + str(len(recv_data) - struct.calcsize("<4sBq")) + "s", recv_data)
         recv_json = simplejson.loads(tmp_data[3])
         #log.info(recv_json)
     except Exception,e:
         log.error(str(e))
Beispiel #18
0
def get_nic(netiface):
    if not netiface:
        found = False
        nics = psutil.net_if_addrs()
        for n, info in nics.items():
            for addr in info:
                if addr.family == socket.AF_INET and addr.address.startswith('10.'):
                    netiface = n
                    found = True
                    break
            if found:
                break
        else:
            return

    try:
        psutil.net_io_counters(True)[netiface]
    except KeyError:
        log.error("Unknown network interface!")
        return

    return netiface
Beispiel #19
0
    def get_fields(self, arg_fields=None, arg_add_host_field=True):
        if arg_fields is None:
            arg_fields = []

        fields = []
        field_set = set()
        for field in arg_fields:
            name, value = field.split('=', 1)
            if name not in field_set:
                fields.append(field)
                field_set.add(name)

        try:
            cfg_fields = self.options('fields')
        except Error:
            cfg_fields = []

        for option in cfg_fields:
            try:
                value = self.get('fields', option)
            except Error:
                continue
            if '<insert service' in value:
                log.error("Set the service type in statsd-agent.cfg")
                continue

            if value and option not in field_set:
                fields.append("{}={}".format(option, value))
                field_set.add(option)

        if self.get_boolean('add-host-field', default=False) or arg_add_host_field and 'host' not in field_set:
            fields.append("host={}".format(socket.gethostname()))

        fields = ','.join([f.replace(',', '_').replace(' ', '_').replace('.', '-') for f in fields])
        if fields and not fields.endswith(','):
            fields = ',' + fields
        return fields
Beispiel #20
0
    def run(self):
        global TOTAL_VIDEO_COUNT

        account_id = self.account_info[0]
        if len(self.account_info) >= 4 and self.account_info[3]:
            account_name = self.account_info[3]
        else:
            account_name = self.account_info[0]

        try:
            log.step(account_name + " 开始")

            # 如果需要重新排序则使用临时文件夹,否则直接下载到目标目录
            if IS_SORT:
                video_path = os.path.join(VIDEO_TEMP_PATH, account_name)
            else:
                video_path = os.path.join(VIDEO_DOWNLOAD_PATH, account_name)

            page_count = 1
            video_count = 1
            first_video_id = "0"
            unique_list = []
            is_over = False
            need_make_download_dir = True
            while not is_over:
                # 获取指定一页的视频信息
                medias_data = get_one_page_video_data(account_id, page_count)
                if medias_data is None:
                    log.error(account_name + " 视频列表获取失败")
                    tool.process_exit()

                for media in medias_data:
                    if not robot.check_sub_key(("video", "id"), media):
                        log.error(account_name + " 第%s个视频信:%s解析失败" % (video_count, media))
                        continue

                    video_id = str(media["id"])

                    # 检查是否图片时间小于上次的记录
                    if int(video_id) <= int(self.account_info[2]):
                        is_over = True
                        break

                    # 新增视频导致的重复判断
                    if video_id in unique_list:
                        continue
                    else:
                        unique_list.append(video_id)
                    # 将第一张图片的上传时间做为新的存档记录
                    if first_video_id == "0":
                        first_video_id = video_id

                    video_url = str(media["video"])
                    log.step(account_name + " 开始下载第%s个视频 %s" % (video_count, video_url))

                    # 第一个视频,创建目录
                    if need_make_download_dir:
                        if not tool.make_dir(video_path, 0):
                            log.error(account_name + " 创建视频下载目录 %s 失败" % video_path)
                            tool.process_exit()
                        need_make_download_dir = False
                        
                    file_path = os.path.join(video_path, "%04d.mp4" % video_count)
                    if tool.save_net_file(video_url, file_path):
                        log.step(account_name + " 第%s个视频下载成功" % video_count)
                        video_count += 1
                    else:
                        log.error(account_name + " 第%s个视频 %s 下载失败" % (video_count, video_url))

                    # 达到配置文件中的下载数量,结束
                    if 0 < GET_VIDEO_COUNT < video_count:
                        is_over = True
                        break

                if not is_over:
                    if len(medias_data) >= VIDEO_COUNT_PER_PAGE:
                        page_count += 1
                    else:
                        # 获取的数量小于请求的数量,已经没有剩余视频了
                        is_over = True

            log.step(account_name + " 下载完毕,总共获得%s个视频" % (video_count - 1))

            # 排序
            if IS_SORT and video_count > 1:
                destination_path = os.path.join(VIDEO_DOWNLOAD_PATH, account_name)
                if robot.sort_file(video_path, destination_path, int(self.account_info[1]), 4):
                    log.step(account_name + " 视频从下载目录移动到保存目录成功")
                else:
                    log.error(account_name + " 创建视频保存目录 %s 失败" % destination_path)
                    tool.process_exit()

            # 新的存档记录
            if first_video_id != "":
                self.account_info[1] = str(int(self.account_info[1]) + video_count - 1)
                self.account_info[2] = first_video_id

            # 保存最后的信息
            tool.write_file("\t".join(self.account_info), NEW_SAVE_DATA_PATH)
            self.thread_lock.acquire()
            TOTAL_VIDEO_COUNT += video_count - 1
            ACCOUNTS.remove(account_id)
            self.thread_lock.release()

            log.step(account_name + " 完成")
        except SystemExit, se:
            if se.code == 0:
                log.step(account_name + " 提前退出")
            else:
                log.error(account_name + " 异常退出")
Beispiel #21
0
    def run(self):
        global TOTAL_IMAGE_COUNT
        global TOTAL_VIDEO_COUNT

        account_id = self.account_info[0]
        # todo 是否有需要显示不同名字
        account_name = account_id

        try:
            log.step(account_name + " 开始")

            # todo 是否需要下载图片或视频
            # 如果需要重新排序则使用临时文件夹,否则直接下载到目标目录
            if IS_SORT:
                image_path = os.path.join(IMAGE_TEMP_PATH, account_name)
                video_path = os.path.join(VIDEO_TEMP_PATH, account_name)
            else:
                image_path = os.path.join(IMAGE_DOWNLOAD_PATH, account_name)
                video_path = os.path.join(VIDEO_DOWNLOAD_PATH, account_name)

            # todo 图片下载逻辑
            # 图片
            image_count = 1
            first_image_time = "0"
            need_make_image_dir = True
            if IS_DOWNLOAD_IMAGE:
                pass

            # todo 视频下载逻辑
            # 视频
            video_count = 1
            first_video_time = "0"
            need_make_video_dir = True
            if IS_DOWNLOAD_VIDEO:
                pass

            log.step(account_name + " 下载完毕,总共获得%s张图片和%s个视频" % (image_count - 1, video_count - 1))

            # 排序
            if IS_SORT:
                # todo 是否需要下载图片
                if first_image_time != "0":
                    destination_path = os.path.join(IMAGE_DOWNLOAD_PATH, account_name)
                    if robot.sort_file(image_path, destination_path, int(self.account_info[1]), 4):
                        log.step(account_name + " 图片从下载目录移动到保存目录成功")
                    else:
                        log.error(account_name + " 创建图片保存目录 %s 失败" % destination_path)
                        tool.process_exit()
                # todo 是否需要下载视频
                if first_video_time != "0":
                    destination_path = os.path.join(VIDEO_DOWNLOAD_PATH, account_name)
                    if robot.sort_file(video_path, destination_path, int(self.account_info[3]), 4):
                        log.step(account_name + " 视频从下载目录移动到保存目录成功")
                    else:
                        log.error(account_name + " 创建视频保存目录 %s 失败" % destination_path)
                        tool.process_exit()

            # todo 是否需要下载图片或视频
            # 新的存档记录
            if first_image_time != "0":
                self.account_info[1] = str(int(self.account_info[1]) + image_count - 1)
                self.account_info[2] = first_image_time
            if first_video_time != "0":
                self.account_info[3] = str(int(self.account_info[3]) + video_count - 1)
                self.account_info[4] = first_video_time

            # 保存最后的信息
            tool.write_file("\t".join(self.account_info), NEW_SAVE_DATA_PATH)
            self.thread_lock.acquire()
            # todo 是否需要下载图片或视频
            TOTAL_IMAGE_COUNT += image_count - 1
            TOTAL_VIDEO_COUNT += video_count - 1
            ACCOUNTS.remove(account_id)
            self.thread_lock.release()

            log.step(account_name + " 完成")
        except SystemExit, se:
            if se.code == 0:
                log.step(account_name + " 提前退出")
            else:
                log.error(account_name + " 异常退出")
Beispiel #22
0
    def run(self):
        global TOTAL_IMAGE_COUNT
        global TOTAL_VIDEO_COUNT

        account_name = self.account_info[0]

        try:
            log.step(account_name + " 开始")

            # 如果需要重新排序则使用临时文件夹,否则直接下载到目标目录
            if IS_SORT:
                image_path = os.path.join(IMAGE_TEMP_PATH, account_name)
                video_path = os.path.join(VIDEO_TEMP_PATH, account_name)
            else:
                image_path = os.path.join(IMAGE_DOWNLOAD_PATH, account_name)
                video_path = os.path.join(VIDEO_DOWNLOAD_PATH, account_name)

            image_count = 1
            video_count = 1
            target_id = INIT_TARGET_ID
            first_post_id = "0"
            is_over = False
            need_make_image_dir = True
            need_make_video_dir = True

            while not is_over:
                # 获取一页日志信息
                message_page_data = get_message_page_data(account_name, target_id)
                if message_page_data is None:
                    log.error(account_name + " 媒体列表解析异常")
                    tool.process_exit()
                # 没有了
                if len(message_page_data) == 0:
                    break

                for message_info in message_page_data:
                    if not robot.check_sub_key(("post",), message_info):
                        log.error(account_name + " 媒体信息解析异常 %s" % message_info)
                        continue
                    if not robot.check_sub_key(("body", "postId"), message_info["post"]):
                        log.error(account_name + " 媒体信息解析异常 %s" % message_info)
                        continue

                    target_id = message_info["post"]["postId"]
                    # 检查是否已下载到前一次的记录
                    if int(target_id) <= int(self.account_info[3]):
                        is_over = True
                        break

                    # 将第一个媒体的postId做为新的存档记录
                    if first_post_id == "0":
                        first_post_id = str(target_id)

                    for media_info in message_info["post"]["body"]:
                        if not robot.check_sub_key(("bodyType",), media_info):
                            log.error(account_name + " 媒体列表bodyType解析异常")
                            continue

                        # bodyType = 1: text, bodyType = 3: image, bodyType = 8: video
                        body_type = int(media_info["bodyType"])
                        if body_type == 1:  # 文本
                            pass
                        elif body_type == 2:  # 表情
                            pass
                        elif body_type == 3:  # 图片
                            if IS_DOWNLOAD_IMAGE:
                                if not robot.check_sub_key(("image",), media_info):
                                    log.error(account_name + " 第%s张图片解析异常%s" % (image_count, media_info))
                                    continue

                                image_url = str(media_info["image"])
                                log.step(account_name + " 开始下载第%s张图片 %s" % (image_count, image_url))

                                # 第一张图片,创建目录
                                if need_make_image_dir:
                                    if not tool.make_dir(image_path, 0):
                                        log.error(account_name + " 创建图片下载目录 %s 失败" % image_path)
                                        tool.process_exit()
                                    need_make_image_dir = False

                                file_type = image_url.split(".")[-1]
                                image_file_path = os.path.join(image_path, "%04d.%s" % (image_count, file_type))
                                if tool.save_net_file(image_url, image_file_path):
                                    log.step(account_name + " 第%s张图片下载成功" % image_count)
                                    image_count += 1
                                else:
                                    log.error(account_name + " 第%s张图片 %s 下载失败" % (image_count, image_url))
                        elif body_type == 8:  # video
                            if IS_DOWNLOAD_VIDEO:
                                if not robot.check_sub_key(("movieUrlHq",), media_info):
                                    log.error(account_name + " 第%s个视频解析异常%s" % (video_count, media_info))
                                    continue

                                video_url = str(media_info["movieUrlHq"])
                                log.step(account_name + " 开始下载第%s个视频 %s" % (video_count, video_url))

                                # 第一个视频,创建目录
                                if need_make_video_dir:
                                    if not tool.make_dir(video_path, 0):
                                        log.error(account_name + " 创建视频下载目录 %s 失败" % video_path)
                                        tool.process_exit()
                                    need_make_video_dir = False

                                file_type = video_url.split(".")[-1]
                                video_file_path = os.path.join(video_path, "%04d.%s" % (video_count, file_type))
                                if tool.save_net_file(video_url, video_file_path):
                                    log.step(account_name + " 第%s个视频下载成功" % video_count)
                                    video_count += 1
                                else:
                                    log.error(account_name + " 第%s个视频 %s 下载失败" % (video_count, video_url))
                        elif body_type == 7:  # 转发
                            pass
                        else:
                            log.error(account_name + " 第%s张图片、第%s个视频,未知bodytype %s, %s" % (image_count, video_count, body_type, media_info))

            # 排序
            if IS_SORT:
                if image_count > 1:
                    destination_path = os.path.join(IMAGE_DOWNLOAD_PATH, account_name)
                    if robot.sort_file(image_path, destination_path, int(self.account_info[1]), 4):
                        log.step(account_name + " 图片从下载目录移动到保存目录成功")
                    else:
                        log.error(account_name + " 创建图片保存目录 %s 失败" % destination_path)
                        tool.process_exit()
                if video_count > 1:
                    destination_path = os.path.join(VIDEO_DOWNLOAD_PATH, account_name)
                    if robot.sort_file(video_path, destination_path, int(self.account_info[2]), 4):
                        log.step(account_name + " 视频从下载目录移动到保存目录成功")
                    else:
                        log.error(account_name + " 创建视频保存目录 %s 失败" % destination_path)
                        tool.process_exit()

            # 新的存档记录
            if first_post_id != "0":
                self.account_info[1] = str(int(self.account_info[1]) + image_count - 1)
                self.account_info[2] = str(int(self.account_info[2]) + video_count - 1)
                self.account_info[3] = first_post_id

            # 保存最后的信息
            tool.write_file("\t".join(self.account_info), NEW_SAVE_DATA_PATH)
            self.thread_lock.acquire()
            TOTAL_IMAGE_COUNT += image_count - 1
            TOTAL_VIDEO_COUNT += video_count - 1
            ACCOUNTS.remove(account_name)
            self.thread_lock.release()

            log.step(account_name + " 完成")
        except SystemExit, se:
            if se.code == 0:
                log.step(account_name + " 提前退出")
            else:
                log.error(account_name + " 异常退出")
Beispiel #23
0
    def main(self):
        # 解析存档文件,获取上一次的album id
        album_id = 1
        if os.path.exists(self.save_data_path):
            save_file = open(self.save_data_path, "r")
            save_info = save_file.read()
            save_file.close()
            album_id = int(save_info.strip())

        total_image_count = 0
        total_video_count = 0
        error_count = 0
        is_over = False
        while not is_over:
            album_url = "http://meituzz.com/album/browse?albumID=%s" % album_id
            try:
                album_page_return_code, album_page = tool.http_request(album_url)[:2]
            except SystemExit:
                log.step("提前退出")
                break

            if album_page_return_code == -500:
                log.error("第%s页相册内部错误" % album_id)
                album_id += 1
                continue
            elif album_page_return_code != 1:
                log.error("第%s页图片获取失败" % album_id)
                break

            if album_page.find("<title>相册已被删除</title>") >= 0:
                error_count += 1
                if error_count >= ERROR_PAGE_COUNT_CHECK:
                    log.error("连续%s页相册没有图片,退出程序" % ERROR_PAGE_COUNT_CHECK)
                    album_id -= error_count - 1
                    break
                else:
                    log.error("第%s页相册已被删除" % album_id)
                    album_id += 1
                    continue
            # 错误数量重置
            error_count = 0

            # 图片下载
            if self.is_download_image and album_page.find('<input type="hidden" id="imageList"') >= 0:
                total_photo_count = tool.find_sub_string(album_page, '<input type="hidden" id="totalPageNum" value=', ' />')
                if not total_photo_count:
                    log.error("第%s页图片数量解析失败" % album_id)
                    break
                total_photo_count = int(total_photo_count)

                # 获取页面全部图片地址列表
                image_url_list = get_image_url_list(album_page)
                if image_url_list is None:
                    log.error("第%s页图片地址列表解析失败" % album_id)
                    break

                if len(image_url_list) == 0:
                    log.error("第%s页没有获取到图片" % album_id)
                    break

                is_fee = False
                if len(image_url_list) != total_photo_count:
                    album_reward_find = re.findall('<input type="hidden" id="rewardAmount" value="(\d*)">', album_page)
                    if len(album_reward_find) == 1:
                        album_reward = int(album_reward_find[0])
                        if album_reward > 0 and total_photo_count - len(image_url_list) <= 1:
                            is_fee = True
                    if not is_fee:
                        log.error("第%s页解析获取的图片数量不符" % album_id)
                        # break

                image_path = os.path.join(self.image_download_path, "%04d" % album_id)
                if not tool.make_dir(image_path, 0):
                    log.error("创建图片下载目录 %s 失败" % image_path)
                    break

                image_count = 1
                for image_url in image_url_list:
                    # 去除模糊效果
                    image_url = str(image_url).split("@")[0]
                    log.step("开始下载第%s页第%s张图片 %s" % (album_id, image_count, image_url))

                    image_file_path = os.path.join(image_path, "%04d.jpg" % image_count)
                    try:
                        if tool.save_net_file(image_url, image_file_path, True):
                            log.step("第%s页第%s张图片下载成功" % (album_id, image_count))
                            image_count += 1
                        else:
                            log.error("第%s页第%s张图片 %s 下载失败" % (album_id, image_count, image_url))
                    except SystemExit:
                        log.step("提前退出")
                        tool.remove_dir(image_path)
                        is_over = True
                        break

                total_image_count += image_count - 1

            # 视频下载
            if self.is_download_image and album_page.find('<input type="hidden" id="VideoUrl"') >= 0:
                # 获取视频下载地址
                video_url = get_video_url(album_page)
                log.step("开始下载第%s页视频 %s" % (album_id, video_url))

                video_title = robot.filter_text(tool.find_sub_string(album_page, "<title>", "</title>"))
                file_type = video_url.split(".")[-1]
                video_file_path = os.path.join(self.video_download_path, "%s %s.%s" % (album_id, video_title, file_type))
                try:
                    if tool.save_net_file(video_url, video_file_path, True):
                        log.step("第%s页视频下载成功" % album_id)
                        total_video_count += 1
                    else:
                        log.error("第%s页视频 %s 下载失败" % (album_id, video_url))
                except SystemExit:
                    log.step("提前退出")
                    is_over = True

            if not is_over:
                album_id += 1

        # 重新保存存档文件
        save_data_dir = os.path.dirname(self.save_data_path)
        if not os.path.exists(save_data_dir):
            tool.make_dir(save_data_dir, 0)
        save_file = open(self.save_data_path, "w")
        save_file.write(str(album_id))
        save_file.close()

        log.step("全部下载完毕,耗时%s秒,共计图片%s张,视频%s个" % (self.get_run_time(), total_image_count, total_video_count))
Beispiel #24
0
    def run(self):
        global TOTAL_IMAGE_COUNT

        account_name = self.account_info[0]

        try:
            log.step(account_name + " 开始")

            # 如果需要重新排序则使用临时文件夹,否则直接下载到目标目录
            if IS_SORT:
                image_path = os.path.join(IMAGE_TEMP_PATH, account_name)
            else:
                image_path = os.path.join(IMAGE_DOWNLOAD_PATH, account_name)

            image_count = 1
            page_count = 1
            first_blog_time = "0"
            is_over = False
            need_make_image_dir = True
            while not is_over:
                # 获取一页日志
                blog_data = get_blog_page_data(account_name, page_count)
                if blog_data is None:
                    log.error(account_name + " 第%s页日志无法获取" % page_count)
                    tool.process_exit()

                # 解析日志发布时间
                blog_time = get_blog_time(blog_data)
                if blog_time is None:
                    log.error(account_name + " 第%s页解析日志时间失败" % page_count)
                    tool.process_exit()

                # 检查是否是上一次的最后blog
                if blog_time <= int(self.account_info[2]):
                    break

                # 将第一个日志的时间做为新的存档记录
                if first_blog_time == "0":
                    first_blog_time = str(blog_time)

                # 从日志列表中获取全部的图片
                image_url_list = get_image_url_list(blog_data)
                for image_url in image_url_list:
                    # 使用默认图片的分辨率
                    image_url = image_url.split("?")[0]
                    # 过滤表情
                    if image_url.find("http://emoji.ameba.jp") >= 0:
                        continue
                    log.step(account_name + " 开始下载第%s张图片 %s" % (image_count, image_url))

                    # 第一张图片,创建目录
                    if need_make_image_dir:
                        if not tool.make_dir(image_path, 0):
                            log.error(account_name + " 创建图片下载目录 %s 失败" % image_path)
                            tool.process_exit()
                        need_make_image_dir = False
                        
                    file_type = image_url.split(".")[-1]
                    file_path = os.path.join(image_path, "%04d.%s" % (image_count, file_type))
                    if tool.save_net_file(image_url, file_path):
                        log.step(account_name + " 第%s张图片下载成功" % image_count)
                        image_count += 1
                    else:
                        log.error(account_name + " 第%s张图片 %s 获取失败" % (image_count, image_url))

                # 达到配置文件中的下载数量,结束
                if 0 < GET_IMAGE_COUNT < image_count:
                    is_over = True

                if not is_over:
                    if 0 < GET_PAGE_COUNT < page_count:
                        is_over = True
                    else:
                        page_count += 1

            log.step(account_name + " 下载完毕,总共获得%s张图片" % (image_count - 1))

            # 排序
            if IS_SORT and image_count > 1:
                destination_path = os.path.join(IMAGE_DOWNLOAD_PATH, account_name)
                if robot.sort_file(image_path, destination_path, int(self.account_info[1]), 4):
                    log.step(account_name + " 图片从下载目录移动到保存目录成功")
                else:
                    log.error(account_name + " 创建图片子目录 %s 失败" % destination_path)
                    tool.process_exit()

            # 新的存档记录
            if first_blog_time != "0":
                self.account_info[1] = str(int(self.account_info[1]) + image_count - 1)
                self.account_info[2] = first_blog_time

            # 保存最后的信息
            tool.write_file("\t".join(self.account_info), NEW_SAVE_DATA_PATH)
            self.thread_lock.acquire()
            TOTAL_IMAGE_COUNT += image_count - 1
            ACCOUNTS.remove(account_name)
            self.thread_lock.release()

            log.step(account_name + " 完成")
        except SystemExit, se:
            if se.code == 0:
                log.step(account_name + " 提前退出")
            else:
                log.error(account_name + " 异常退出")
Beispiel #25
0
    def run(self):
        global TOTAL_IMAGE_COUNT

        coser_id = self.account_info[0]
        if len(self.account_info) >= 3:
            cn = self.account_info[2]
        else:
            cn = self.account_info[0]

        try:
            log.step(cn + " 开始")

            image_path = os.path.join(IMAGE_DOWNLOAD_PATH, cn)

            # 图片下载
            this_cn_total_image_count = 0
            page_count = 1
            total_rp_count = 1
            first_rp_id = ""
            unique_list = []
            is_over = False
            need_make_download_dir = True  # 是否需要创建cn目录
            while not is_over:
                # 获取一页的作品信息
                post_page = get_one_page_post(coser_id, page_count)
                if post_page is None:
                    log.error(cn + " 无法访问第%s页作品" % page_count)
                    tool.process_exit()

                # 解析作品信息,获取所有的正片信息
                cp_id, rp_list = get_rp_list(post_page)
                if cp_id is None:
                    log.error(cn + " 第%s页作品解析异常" % page_count)
                    tool.process_exit()

                for rp_id, title in rp_list.iteritems():
                    # 检查是否已下载到前一次的图片
                    if int(rp_id) <= int(self.account_info[1]):
                        is_over = True
                        break

                    # 新增正片导致的重复判断
                    if rp_id in unique_list:
                        continue
                    else:
                        unique_list.append(rp_id)
                    # 将第一个作品的id做为新的存档记录
                    if first_rp_id == "":
                        first_rp_id = rp_id

                    log.trace("rp: " + rp_id)

                    if need_make_download_dir:
                        if not tool.make_dir(image_path, 0):
                            log.error(cn + " 创建CN目录 %s 失败" % image_path)
                            tool.process_exit()
                        need_make_download_dir = False

                    # 过滤标题中不支持的字符
                    title = robot.filter_text(title)
                    if title:
                        rp_path = os.path.join(image_path, "%s %s" % (rp_id, title))
                    else:
                        rp_path = os.path.join(image_path, rp_id)
                    if not tool.make_dir(rp_path, 0):
                        # 目录出错,把title去掉后再试一次,如果还不行退出
                        log.error(cn + " 创建作品目录 %s 失败,尝试不使用title" % rp_path)
                        rp_path = os.path.join(image_path, rp_id)
                        if not tool.make_dir(rp_path, 0):
                            log.error(cn + " 创建作品目录 %s 失败" % rp_path)
                            tool.process_exit()

                    # 获取正片页面内的所有图片地址列表
                    image_url_list = get_image_url_list(cp_id, rp_id)
                    if image_url_list is None:
                        log.error(cn + " 无法访问正片:%s,cp_id:%s" % (rp_id, cp_id))
                        continue

                    if len(image_url_list) == 0 and IS_AUTO_FOLLOW:
                        log.step(cn + " 检测到可能有私密作品且账号不是ta的粉丝,自动关注")
                        if follow(coser_id):
                            # 重新获取下正片页面内的所有图片地址列表
                            image_url_list = get_image_url_list(cp_id, rp_id)

                    if len(image_url_list) == 0:
                        log.error(cn + " 正片:%s没有任何图片,可能是你使用的账号没有关注ta,所以无法访问只对粉丝开放的私密作品,cp_id:%s" % (rp_id, cp_id))
                        continue

                    image_count = 1
                    for image_url in list(image_url_list):
                        # 禁用指定分辨率
                        image_url = "/".join(image_url.split("/")[0:-1])
                        log.step(cn + " %s 开始下载第%s张图片 %s" % (rp_id, image_count, image_url))

                        if image_url.rfind("/") < image_url.rfind("."):
                            file_type = image_url.split(".")[-1]
                        else:
                            file_type = "jpg"
                        file_path = os.path.join(rp_path, "%03d.%s" % (image_count, file_type))
                        if tool.save_net_file(image_url, file_path):
                            image_count += 1
                            log.step(cn + " %s 第%s张图片下载成功" % (rp_id, image_count))
                        else:
                            log.error(cn + " %s 第%s张图片 %s 下载失败" % (rp_id, image_count, image_url))

                    this_cn_total_image_count += image_count - 1

                    if 0 < GET_PAGE_COUNT < total_rp_count:
                        is_over = True
                        break
                    else:
                        total_rp_count += 1

                if not is_over:
                    if page_count >= get_max_page_count(coser_id, post_page):
                        is_over = True
                    else:
                        page_count += 1

            log.step(cn + " 下载完毕,总共获得%s张图片" % this_cn_total_image_count)

            # 新的存档记录
            if first_rp_id != "":
                self.account_info[1] = first_rp_id

            # 保存最后的信息
            tool.write_file("\t".join(self.account_info), NEW_SAVE_DATA_PATH)
            self.thread_lock.acquire()
            TOTAL_IMAGE_COUNT += this_cn_total_image_count
            ACCOUNTS.remove(coser_id)
            self.thread_lock.release()

            log.step(cn + " 完成")
        except SystemExit:
            log.error(cn + " 异常退出")
        except Exception, e:
            log.error(cn + " 未知异常")
            log.error(str(e) + "\n" + str(traceback.format_exc()))
Beispiel #26
0
    def run(self):
        global TOTAL_IMAGE_COUNT

        account_name = self.account_info[0]

        try:
            log.step(account_name + " 开始")

            if account_name.isdigit():
                site_id = account_name
            else:
                site_id = get_site_id(account_name)
            if site_id is None:
                log.error(account_name + " 主页无法访问")
                tool.process_exit()

            if not site_id:
                log.error(account_name + " site id解析失败")
                tool.process_exit()

            image_path = os.path.join(IMAGE_DOWNLOAD_PATH, account_name)

            this_account_total_image_count = 0
            post_count = 0
            first_post_id = "0"
            post_time = "2016-11-16 14:12:00"
            is_over = False
            while not is_over:
                # 获取一页的相册信息列表
                post_info_list = get_one_page_post_info_list(site_id, post_time)
                if post_info_list is None:
                    log.error(account_name + " 相册信息列表无法访问")
                    tool.process_exit()

                # 如果为空,表示已经取完了
                if len(post_info_list) == 0:
                    break

                for post_info in post_info_list:
                    if not robot.check_sub_key(("title", "post_id", "published_at", "images"), post_info):
                        log.error(account_name + " 相册信息解析失败:%s" % post_info)
                        continue

                    post_id = str(post_info["post_id"])

                    # 检查信息页id是否小于上次的记录
                    if int(post_id) <= int(self.account_info[1]):
                        is_over = True
                        break

                    # 将第一个信息页的id做为新的存档记录
                    if first_post_id == "0":
                        first_post_id = post_id

                    # 过滤标题中不支持的字符
                    title = robot.filter_text(post_info["title"])
                    if title:
                        post_path = os.path.join(image_path, "%s %s" % (post_id, title))
                    else:
                        post_path = os.path.join(image_path, post_id)
                    if not tool.make_dir(post_path, 0):
                        # 目录出错,把title去掉后再试一次,如果还不行退出
                        log.error(account_name + " 创建相册目录 %s 失败,尝试不使用title" % post_path)
                        post_path = os.path.join(image_path, post_id)
                        if not tool.make_dir(post_path, 0):
                            log.error(account_name + " 创建相册目录 %s 失败" % post_path)
                            tool.process_exit()

                    image_count = 0
                    for image_info in post_info["images"]:
                        image_count += 1
                        if not robot.check_sub_key(("img_id",), image_info):
                            log.error(account_name + " 相册%s 第%s张图片解析失败" % (post_id, image_count))
                            continue
                        image_url = generate_large_image_url(site_id, image_info["img_id"])
                        log.step(account_name + " 相册%s 开始下载第%s张图片 %s" % (post_id, image_count, image_url))

                        file_path = os.path.join(post_path, "%s.jpg" % image_count)
                        if tool.save_net_file(image_url, file_path):
                            log.step(account_name + " 相册%s 第%s张图片下载成功" % (post_id, image_count))
                        else:
                            log.error(account_name + " 相册%s 第%s张图片 %s 下载失败" % (post_info["post_id"], image_count, image_url))
                    this_account_total_image_count += image_count

                    if not is_over:
                        # 达到配置文件中的下载页数,结束
                        if 0 < GET_PAGE_COUNT < post_count:
                            is_over = True
                        else:
                            # 相册发布时间
                            post_time = post_info["published_at"]
                            post_count += 1

            log.step(account_name + " 下载完毕,总共获得%s张图片" % this_account_total_image_count)

            # 新的存档记录
            if first_post_id != "0":
                self.account_info[1] = first_post_id

            # 保存最后的信息
            tool.write_file("\t".join(self.account_info), NEW_SAVE_DATA_PATH)
            self.thread_lock.acquire()
            TOTAL_IMAGE_COUNT += this_account_total_image_count
            ACCOUNTS.remove(account_name)
            self.thread_lock.release()

            log.step(account_name + " 完成")
        except SystemExit, se:
            if se.code == 0:
                log.step(account_name + " 提前退出")
            else:
                log.error(account_name + " 异常退出")
Beispiel #27
0
 def eb(p):
     log.error(p)
Beispiel #28
0
                result = False
            elif not operateYaml.get_yaml(case["casePath"]):
                result = False
    return result


if __name__ == '__main__':
    log.info(
        "===============================Start==================================="
    )
    devices_conf_yaml = PATH("../devices.yaml")
    if len(sys.argv) > 0:
        devices_conf_yaml = PATH(sys.argv[0])
    devices_conf = operateYaml.get_yaml(PATH(devices_conf_yaml))
    if adbCommon.attached_devices():
        appium_server = server.AppiumServer(devices_conf)
        appium_server.start_server()
        while not appium_server.is_running():
            time.sleep(2)
        runner_pool(devices_conf)
        appium_server.stop_server()
        operateFile.OperateFile(Constants.REPORT_COLLECT_PATH).remove_file()
        operateFile.OperateFile(Constants.REPORT_INIT).remove_file()
        operateFile.OperateFile(Constants.REPORT_INFO_PATH).remove_file()
        operateFile.OperateFile(Constants.CRASH_LOG_PATH).remove_file()
    else:
        log.error(u"设备不存在")
    log.info(
        "=======================================End====================================="
    )
Beispiel #29
0
    def run(self):
        global TOTAL_IMAGE_COUNT

        account_id = self.account_info[0]
        if len(self.account_info) >= 4 and self.account_info[3]:
            account_name = self.account_info[3]
        else:
            account_name = self.account_info[0]

        try:
            log.step(account_name + " 开始")

            # 如果需要重新排序则使用临时文件夹,否则直接下载到目标目录
            if IS_SORT:
                image_path = os.path.join(IMAGE_TEMP_PATH, account_name)
            else:
                image_path = os.path.join(IMAGE_DOWNLOAD_PATH, account_name)

            # 图片
            image_count = 1
            page_count = 1
            first_blog_id = "0"
            need_make_image_dir = True
            is_over = False
            is_big_image_over = False
            while not is_over:
                # 获取一页日志信息
                blog_page = get_one_page_blog(account_id, page_count)
                if blog_page is None:
                    log.error(account_name + " 第%s页日志获取失败" % page_count)
                    tool.process_exit()
                if not blog_page:
                    log.error(account_name + " 第%s页日志解析失败" % page_count)
                    tool.process_exit()

                blog_data_list = get_blog_data_list(blog_page)
                if len(blog_data_list) == 0:
                    log.error(account_name + " 第%s页日志分组失败" % page_count)
                    tool.process_exit()

                for blog_data in blog_data_list:
                    # 获取日志id
                    blog_id = get_blog_id(account_id, blog_data)
                    if blog_id is None:
                        log.error(account_name + " 日志解析日志id失败,日志内容:%s" % blog_data)
                        tool.process_exit()

                    # 检查是否已下载到前一次的日志
                    if blog_id <= int(self.account_info[2]):
                        is_over = True
                        break

                    # 将第一个日志的ID做为新的存档记录
                    if first_blog_id == "0":
                        first_blog_id = str(blog_id)

                    # 获取该页日志的全部图片地址列表
                    image_url_list = get_image_url_list(blog_data)
                    if len(image_url_list) == 0:
                        continue

                    # 获取日志页面中存在的所有大图显示地址,以及对应的小图地址
                    big_2_small_list = get_big_image_url_list(blog_data)

                    # 下载图片
                    for image_url in image_url_list:
                        # 检查是否存在大图可以下载
                        if not is_big_image_over:
                            image_url, is_big_image_over = check_big_image(image_url, big_2_small_list)
                        log.step(account_name + " 开始下载第%s张图片 %s" % (image_count, image_url))

                        # 第一张图片,创建目录
                        if need_make_image_dir:
                            if not tool.make_dir(image_path, 0):
                                log.error(account_name + " 创建图片下载目录 %s 失败" % image_path)
                                tool.process_exit()
                            need_make_image_dir = False

                        file_type = image_url.split(".")[-1]
                        if file_type.find("?") != -1:
                            file_type = "jpeg"
                        file_path = os.path.join(image_path, "%04d.%s" % (image_count, file_type))
                        if tool.save_net_file(image_url, file_path):
                            log.step(account_name + " 第%s张图片下载成功" % image_count)
                            image_count += 1
                        else:
                            log.error(account_name + " 第%s张图片 %s 下载失败" % (image_count, image_url))

                        # 达到配置文件中的下载数量,结束
                        if 0 < GET_IMAGE_COUNT < image_count:
                            is_over = True
                            break

                if not is_over:
                    # 达到配置文件中的下载页数,结束
                    if 0 < GET_PAGE_COUNT <= page_count:
                        is_over = True
                    # 判断当前页数是否大等于总页数
                    elif page_count >= get_max_page_count(blog_page):
                        is_over = True
                    else:
                        page_count += 1

            log.step(account_name + " 下载完毕,总共获得%s张图片" % (image_count - 1))

            # 排序
            if IS_SORT:
                if first_blog_id != "0":
                    destination_path = os.path.join(IMAGE_DOWNLOAD_PATH, account_name)
                    if robot.sort_file(image_path, destination_path, int(self.account_info[1]), 4):
                        log.step(account_name + " 图片从下载目录移动到保存目录成功")
                    else:
                        log.error(account_name + " 创建图片保存目录 %s 失败" % destination_path)
                        tool.process_exit()

            # 新的存档记录
            if first_blog_id != "0":
                self.account_info[1] = str(int(self.account_info[1]) + image_count - 1)
                self.account_info[2] = first_blog_id

            # 保存最后的信息
            self.thread_lock.acquire()
            tool.write_file("\t".join(self.account_info), NEW_SAVE_DATA_PATH)
            TOTAL_IMAGE_COUNT += image_count - 1
            ACCOUNTS.remove(account_id)
            self.thread_lock.release()

            log.step(account_name + " 完成")
        except SystemExit, se:
            if se.code == 0:
                log.step(account_name + " 提前退出")
            else:
                log.error(account_name + " 异常退出")
Beispiel #30
0
    def run(self):
        global TOTAL_VIDEO_COUNT

        account_id = self.account_info[0]
        if len(self.account_info) >= 3 and self.account_info[2]:
            account_name = self.account_info[2]
        else:
            account_name = self.account_info[0]

        try:
            log.step(account_name + " 开始")

            # 如果需要重新排序则使用临时文件夹,否则直接下载到目标目录
            if IS_SORT:
                video_path = os.path.join(VIDEO_TEMP_PATH, account_name)
            else:
                video_path = os.path.join(VIDEO_DOWNLOAD_PATH, account_name)

            # 获取视频信息列表
            video_info_list = get_video_info_list(account_id)
            if video_info_list is None:
                log.error(account_name + " 视频列表获取失败")
                tool.process_exit()

            video_count = 1
            first_video_id = "0"
            need_make_video_dir = True
            for video_info in video_info_list:
                if not robot.check_sub_key(("item_data",), video_info) or \
                        not robot.check_sub_key(("watch_id", "title"), video_info["item_data"]):
                    log.error(account_name + " 视频信息%s解析失败" % video_info)
                    tool.process_exit()

                # sm30043563
                video_id = str(video_info["item_data"]["watch_id"])

                # 过滤标题中不支持的字符
                video_title = robot.filter_text(video_info["item_data"]["title"])

                # 第一个视频,创建目录
                if need_make_video_dir:
                    if not tool.make_dir(video_path, 0):
                        log.error(account_name + " 创建图片下载目录 %s 失败" % video_path)
                        tool.process_exit()
                    need_make_video_dir = False

                # 获取视频下载地址
                video_url = get_video_url(video_id)
                log.step(account_name + " 开始下载第%s个视频 %s %s" % (video_count, video_id, video_url))
                print video_title
                print "%s %s" % (video_id, video_title)
                file_path = os.path.join(video_path, "%s %s.mp4" % (video_id, video_title))
                if tool.save_net_file(video_url, file_path):
                    log.step(account_name + " 第%s个视频下载成功" % video_count)
                    video_count += 1
                else:
                    log.error(account_name + " 第%s个视频 %s %s 下载失败" % (video_count, video_id, video_url))

            log.step(account_name + " 下载完毕,总共获得%s个视频" % (video_count - 1))

            # 排序
            if IS_SORT:
                if first_video_id != "0":
                    destination_path = os.path.join(VIDEO_DOWNLOAD_PATH, account_name)
                    if robot.sort_file(video_path, destination_path, int(self.account_info[3]), 4):
                        log.step(account_name + " 视频从下载目录移动到保存目录成功")
                    else:
                        log.error(account_name + " 创建视频保存目录 %s 失败" % destination_path)
                        tool.process_exit()

            # 新的存档记录
            if first_video_id != "0":
                self.account_info[1] = first_video_id

            # 保存最后的信息
            tool.write_file("\t".join(self.account_info), NEW_SAVE_DATA_PATH)
            self.thread_lock.acquire()
            TOTAL_VIDEO_COUNT += video_count - 1
            ACCOUNTS.remove(account_id)
            self.thread_lock.release()

            log.step(account_name + " 完成")
        except SystemExit, se:
            if se.code == 0:
                log.step(account_name + " 提前退出")
            else:
                log.error(account_name + " 异常退出")
Beispiel #31
0
def calculate_single_beamlet(beamlets, opt):
    res = {"beamlets": [], "fluence_doses": None}
    try:
        condInterestingVoxels = read_matrix(opt["interesting_voxels"])
        dose_tolerance_min = float(opt["dose_tolerance_min"])
        beam_no = opt["beam_no"]
        hostname = socket.gethostname()

        fluence_doses = None

        import tempfile

        with tempfile.TemporaryDirectory() as node_processing_folder:
            # print("Using node processing folder: %s" % node_processing_folder)

            first_idx = None
            last_idx = None
            for beamlet in beamlets:
                # print(f"Processing beamlet no: {beamlet}")
                idx = beamlet["idx"]

                # --------------------- OBLICZ UZYWAJAC VNC ----------------------------------------------------
                vmc_beamlet_spec_filename = "%s/beamlet_%s.vmc" % (
                    node_processing_folder, idx)
                vmc_beamlet_spec_name = "beamlet_%s" % idx
                write_beamlet(beamlet, vmc_beamlet_spec_filename, opt)
                write_beamlet(beamlet, "/tmp/akuku.vmc", opt)

                print(
                    f"Calling in parallel (beamlet: {idx}): {opt['vmc_home']}/vmc_wrapper {opt['vmc_home']} {node_processing_folder} {opt['xvmc_dir']} {opt['vmc_home']}/bin/vmc_Linux.exe {vmc_beamlet_spec_name}"
                )
                p = subprocess.Popen([
                    "%s/vmc_wrapper" % opt["vmc_home"], opt["vmc_home"],
                    node_processing_folder, opt["xvmc_dir"],
                    "%s/bin/vmc_Linux.exe" % opt["vmc_home"],
                    vmc_beamlet_spec_name
                ],
                                     stdin=subprocess.PIPE,
                                     stdout=subprocess.PIPE,
                                     stderr=subprocess.PIPE)
                p.wait()
                doses_filename = "%s/%s_phantom.dos" % (node_processing_folder,
                                                        vmc_beamlet_spec_name)

                beamlet_doses = read_doses(doses_filename)
                # --------------------------------------------------------------------------------------------

                if 'fluence' in beamlet and beamlet['fluence'] is not None:
                    print(f"Fluence: {beamlet['fluence']}")
                    if fluence_doses is None:
                        fluence_doses = beamlet_doses.copy() * float(
                            beamlet['fluence'])
                    else:
                        fluence_doses += beamlet_doses * float(
                            beamlet['fluence'])

                if opt["delete_doses_file_after"]:
                    os.remove(doses_filename)

                if opt["delete_vmc_file_after"]:
                    os.remove(vmc_beamlet_spec_filename)

                if beamlet_doses is not None:
                    last_idx = idx

                    if condInterestingVoxels is not None:
                        ####################################################################################################
                        # Wybierz tylko dawki voxeli, których wartość większa od 0.001 mediany oraz należy do jakiegoś ROIa
                        # Wynik zapisz w macierzy nwierszy na dwie kolumny. Pierwsza kolumna to indeks voxela, druga dawka
                        ####################################################################################################
                        maxDoseThisBeamlet = numpy.max(beamlet_doses)
                        print(
                            f"Wycinam tylko voksele, w ktorych dawka jest wieksza od: {maxDoseThisBeamlet * dose_tolerance_min} ({dose_tolerance_min*100}%%)"
                        )
                        cond = (beamlet_doses >
                                (maxDoseThisBeamlet * dose_tolerance_min)) & (
                                    condInterestingVoxels)

                        # //? add

                        vdoses = beamlet_doses[cond]
                        vindexes = numpy.where(cond)[
                            0]  # zwraca indeksy pasujących
                        mdoses = numpy.zeros((len(vdoses), 2),
                                             dtype=numpy.float32)
                        mdoses[:, 0] = vindexes
                        mdoses[:, 1] = vdoses

                        beamlet["doses_map"] = mdoses

                else:
                    log.error("ERROR! beamlet_doses == None!")

                res['beamlets'].append(beamlet)

        res["fluence_doses"] = fluence_doses

        return res
    except:
        traceback.print_exc()
        return None
Beispiel #32
0
    def run(self):
        global TOTAL_VIDEO_COUNT

        account_id = self.account_info[0]
        if len(self.account_info) >= 4 and self.account_info[3]:
            account_name = self.account_info[3]
        else:
            account_name = self.account_info[0]

        try:
            log.step(account_name + " 开始")

            # 如果需要重新排序则使用临时文件夹,否则直接下载到目标目录
            if IS_SORT:
                video_path = os.path.join(VIDEO_TEMP_PATH, account_name)
            else:
                video_path = os.path.join(VIDEO_DOWNLOAD_PATH, account_name)

            suid = get_suid(account_id)
            if suid is None:
                log.error(account_name + " suid获取失败")

            page_count = 1
            video_count = 1
            first_video_scid = ""
            unique_list = []
            is_over = False
            need_make_download_dir = True
            while suid != "" and (not is_over):
                # 获取指定一页的视频信息
                media_page = get_one_page_video_data(suid, page_count)
                if media_page is None:
                    log.error(account_name + " 视频列表获取失败")
                    tool.process_exit()

                # 获取视频scid列表
                scid_list = get_scid_list(media_page["msg"])
                if len(scid_list) == 0:
                    log.error(account_name + " 在视频列表:%s 中没有找到视频scid" % str(media_page["msg"]))
                    tool.process_exit()

                for scid in scid_list:
                    scid = str(scid)

                    # 检查是否已下载到前一次的图片
                    if first_video_scid == self.account_info[2]:
                        is_over = True
                        break

                    # 新增视频导致的重复判断
                    if scid in unique_list:
                        continue
                    else:
                        unique_list.append(scid)
                    # 将第一个视频的id做为新的存档记录
                    if first_video_scid == "":
                        first_video_scid = scid

                    # 获取视频下载地址
                    video_url = get_video_url_by_video_id(scid)
                    if video_url is None:
                        log.error(account_name + " 第%s个视频 %s 获取下载地址失败" % (video_count, scid))
                        continue

                    log.step(account_name + " 开始下载第%s个视频 %s" % (video_count, video_url))

                    # 第一个视频,创建目录
                    if need_make_download_dir:
                        if not tool.make_dir(video_path, 0):
                            log.error(account_name + " 创建视频下载目录 %s 失败" % video_path)
                            tool.process_exit()
                        need_make_download_dir = False

                    file_path = os.path.join(video_path, "%04d.mp4" % video_count)
                    if tool.save_net_file(video_url, file_path):
                        log.step(account_name + " 第%s个视频下载成功" % video_count)
                        video_count += 1
                    else:
                        log.error(account_name + " 第%s个视频 %s 下载失败" % (video_count, video_url))

                    # 达到配置文件中的下载数量,结束
                    if 0 < GET_VIDEO_COUNT < video_count:
                        is_over = True
                        break

                if not is_over:
                    if media_page["isall"]:
                        is_over = True
                    else:
                        page_count += 1

            log.step(account_name + " 下载完毕,总共获得%s个视频" % (video_count - 1))

            # 排序
            if IS_SORT and video_count > 1:
                destination_path = os.path.join(VIDEO_DOWNLOAD_PATH, account_name)
                if robot.sort_file(video_path, destination_path, int(self.account_info[1]), 4):
                    log.step(account_name + " 视频从下载目录移动到保存目录成功")
                else:
                    log.error(account_name + " 创建视频保存目录 %s 失败" % destination_path)
                    tool.process_exit()

            # 新的存档记录
            if first_video_scid != "":
                self.account_info[1] = str(int(self.account_info[1]) + video_count - 1)
                self.account_info[2] = first_video_scid

            # 保存最后的信息
            tool.write_file("\t".join(self.account_info), NEW_SAVE_DATA_PATH)
            self.thread_lock.acquire()
            TOTAL_VIDEO_COUNT += video_count - 1
            ACCOUNTS.remove(account_id)
            self.thread_lock.release()

            log.step(account_name + " 完成")
        except SystemExit, se:
            if se.code == 0:
                log.step(account_name + " 提前退出")
            else:
                log.error(account_name + " 异常退出")
Beispiel #33
0
    xml_report_path = conf.xml_report_path
    html_report_path = conf.html_report_path

    # 初始化allure环境配置文件environment.xml
    initialize_env.Init_Env().init()

    # 定义测试集
    args = ['-s', '-q', '--alluredir', xml_report_path]
    # args = ['-s', '-q', '--alluredir', "H:\\api_auto_test\\report\xml"]
    pytest.main(args)
    cmd = 'allure generate %s -o %s  --clean' % (
        xml_report_path, html_report_path)
    log.info("执行allure,生成测试报告")
    log.debug(cmd)
    try:
        shell.invoke(cmd)
    except Exception:
        log.error('执行用例失败,请检查环境配置')
        raise
    if conf.send == "yes":
        try:
            mail = send_email.SendMail()
            mail.sendMail()
        except Exception as e:
            log.error('发送邮件失败,请检查邮件配置')
            raise
    elif conf.send == "no":
        log.info("配置为发送邮件")
    else:
        raise RuntimeError('配置文件错误:send只能为"yes" or "no"')
Beispiel #34
0
 def eb(p):
     log.error(p)
Beispiel #35
0
    def run(self,
            query,
            target,
            src=None,
            params=None,
            parser=None,
            output_path=None):
        """Runs minimap using subprocess.

		Args:
			query: 				iterable of read-like objects or path to fasta
			target:				iterable of read-like objects or path to fasta
			src (str)			path to minimap executable. self.src if None
			params (str):		string of minimap parameters. self.params if None
			parser (func(x)):	parser func for minimap stdout result. MinimapWrapper.paf_parser if None
			output_path (str):	cache path to save mapping result to
		
		Note:
			read-like requires 'id' and 'seq' attributes

		Returns:
			output: 			result of parser
		"""

        from subprocess import Popen, PIPE
        from os.path import exists

        ## Check type(query), make temp file and write query seqs as needed
        if isinstance(query, basestring):
            if not exists(query):
                log.error(
                    'Provided query path is invalid, please provide a path as a string or Bio.SeqIO-like objects'
                )
            query_path = query
            query_file = None
        else:
            # try:
            # try:
            query_file = self.create_temp_file(write_data=fasta_from_seq(*zip(
                *[(x.id, x.seq) for x in query])))
            # except TypeError: # is not iterable
            # query_file = self.create_temp_file(write_data=fasta_from_seq(query.id, query.seq))
            # except AttributeError as e:
            # 	log.error('Provided query input is invalid, please provide a path as a string or Bio.SeqIO-like objects')
            # raise e
            query_path = query_file.name

        ## Check type(target), make temp file and write target seqs as needed
        if isinstance(target, basestring):
            if not exists(target):
                log.error(
                    'Provided target path is invalid, please provide a path as a string or Bio.SeqIO-like objects'
                )
            target_path = target
            target_file = None
        else:
            try:
                try:
                    target_file = self.create_temp_file(
                        write_data=fasta_from_seq(*zip(*[(x.id, x.seq)
                                                         for x in target])))
                except TypeError:  # is not iterable
                    target_file = self.create_temp_file(
                        write_data=fasta_from_seq(target.id, target.seq))
            except AttributeError as e:
                log.error(
                    'Provided target input is invalid, please provide a path as a string or Bio.SeqIO-like objects'
                )
                raise e
            target_path = target_file.name

        if not src:
            src = self.src
        if not params:
            params = self.params
        if not output_path:
            output_path = self.output_path
        if not parser:
            parser = MinimapWrapper.paf_parser

        command = ' '.join([src, params, target_path, query_path])
        log.debug('Running minimap:\n{}'.format(command))
        process = Popen(command.split(), stdout=PIPE, stderr=PIPE)
        stdout, stderr = process.communicate()

        ## save / cache output if needed
        if output_path:
            try:
                with open(output_path, 'wb') as f:
                    f.write(stdout)
            except OSError as e:
                log.error(
                    'Provided minimap output path is not valid, output will be discarded'
                )

        if not stdout.strip():
            log.error('Minimap returned no mapping')
            log.debug(stderr)
            log.debug(stdout)
            with open(query_path, 'r') as f:
                log.debug(f.readlines())
            with open(target_path, 'r') as f:
                log.debug(f.readlines())
            raise ValueError('Minimap returned no mapping')

        output = parser(stdout.strip())

        if query_file:
            query_file.close()
        if target_file:
            target_file.close()

        return output