def compare_rgb(path, rgb_folder): rgb_pic = calculate_pic_rgb(path) # 主要就是怎么比这几个值了,要根据哪个值为准呢 # 当前我决定取,rgb每个值都差了15 以上,则认为还没加载完成,这个值取啥一样是个难点 r_threshold = 20 g_threshold = 20 b_threshold = 20 conf_default = Config("default.ini") app_key = conf_default.getconf("default").app # 陌陌由于白色部分过于多,因此把上限范围弄小点 if app_key == "momo": r_threshold = 10 g_threshold = 10 b_threshold = 10 # 除了上限,还要加个下限,预防启动黑了的情况 if rgb_pic[0] > rgb_folder[0] + r_threshold: if rgb_pic[1] > rgb_folder[1] + g_threshold: if rgb_pic[2] > rgb_folder[2] + b_threshold: return False if rgb_pic[0] < rgb_folder[0] - 50: if rgb_pic[1] < rgb_folder[1] - 50: if rgb_pic[2] < rgb_folder[2] - 50: return False return True
def get_ent_pos(path, out_path, machineName): origin_height = clip_specific_pic(path, out_path + machineName + "_clip.jpg") img = cv2.imread(out_path + machineName + "_clip.jpg") height, width, something = img.shape print "height = {}, width = {}".format(height, width) start_time = datetime.datetime.now() y_array = [0] array_flag = False for i in range(0, height, 2): # 行数遍历 count = 0 for j in range(0, width, 2): if img[i, j][0] >= 252 and img[i, j][1] >= 252 and img[i, j][2] >= 252: count += 1 if count >= width / 4: # print count if not array_flag: y_array.append(i) array_flag = True else: if array_flag: y_array.append(i - 1) array_flag = False print "pre-deal time = {}".format(datetime.datetime.now() - start_time) print y_array ent_part_pair = [] for t in range(1, len(y_array)): if y_array[t] - y_array[t - 1] > 120: # 测试过程可以打开这个 ent_part_pair.append((y_array[t - 1], y_array[t])) print ent_part_pair print "all time = {}".format(datetime.datetime.now() - start_time) x = width * 3 / 4 leng_arr = len(ent_part_pair) # 特征直播间存起来,似乎不用裁剪,只要专门往上算10个px(有坐标就行),看这10个px平均RGB多少,拿什么存坐标呢(settings) # 还是要裁剪图片的,因为需要图片来判断是否进入直播间成功 # 裁第几格子,这个要靠app判断,读配置文件 conf_default = Config("default.ini") app_key = conf_default.getconf("default").app count = 2 if app_key == "bigo": count = 1 clip_specific_area(out_path + machineName + "_clip.jpg", out_path + machineName + "_feature.jpg", width / 4, ent_part_pair[leng_arr - count][0], width, ent_part_pair[leng_arr - count][1]) settings.set_value("feature_path", out_path + machineName + "_feature.jpg") settings.set_value("ent_top_pos_x", x) settings.set_value( "ent_top_pos_y", ent_part_pair[leng_arr - count][0] + origin_height * 0.12) # 潜规则,要加上12%的高度,因为裁图裁掉了12% y = (ent_part_pair[leng_arr - count][0] + ent_part_pair[leng_arr - count][1]) / 2 + origin_height * 0.12 print "finally x = {}, y = {}".format(x, y) return x, y
def get_device_params(): conf = Config("default.ini") event = conf.getconf("serial").serial_number serial = event.split(',') deviceInfo = DeviceInfo(serial[0]) device_name = deviceInfo.getDeviceInfo( ) # device name 看看韦总到时候怎么处理,把这一个干掉,我这边的操作其实很多余 device_name = re.sub('\s', '', device_name) return device_name
def startAppBySwipe(self, d, times, name): conf = Config("default.ini") app_name = conf.getconf("default").app_name try: MLog.info("startAppBySwipe:" + u"try start app ,name = " + app_name) bounds = d(text=app_name).info['bounds'] print bounds except Exception, e: MLog.info(repr(e)) app_name = "@" + app_name MLog.info( u"start_app startAppBySwipe: change app's start name , appname is " + app_name)
def get_start_params(): frame = 50 firstLaunchTimes = 0 notFirstLaunchTimes = 0 enterLiveTimes = 1 apkName = u"yy.apk" package = u"com.duowan.mobile" try: MLog.info(u"sys_config get_start_params: 读取配置文件参数...") conf = Config("default.ini") frame = conf.getconf("default").frame firstLaunchTimes = conf.getconf("default").first_start notFirstLaunchTimes = conf.getconf("default").normal_start enterLiveTimes = conf.getconf("default").enter_liveroom apkName = conf.getconf("default").apk_name package = conf.getconf("default").package except Exception: MLog.error(u"获取参数错误,使用默认值") frame = 50 firstLaunchTimes = 1 notFirstLaunchTimes = 1 enterLiveTimes = 1 apkName = u"yy.apk" package = u"com.duowan.mobile" finally: # start_python 需要运行在init_ffmpeg后面,否则拿不到帧数的值 MLog.info("apkName = " + str(apkName) + " ,first_start = " \ + str(firstLaunchTimes) + " ,normal_start = " + str(notFirstLaunchTimes) + " ,frame = " + str(frame)) return int(firstLaunchTimes), int(notFirstLaunchTimes), int( enterLiveTimes), str(apkName), str(package)
def start_calculate(device_name): conf_default = Config("default.ini") app_key = conf_default.getconf("default").app first_launch_result = [] normal_launch_result = [] if app_key == "huya" or app_key == "momo": first_launch_result = multi_huya_calculate(device_name) else: first_launch_result = multi_normal_calculate(device_name, "first") # 以后想适配虎牙陌陌的话,必须uiautomator那边要手动处理下登录/跳过 normal_launch_result = multi_normal_calculate(device_name, "notfirst") # TODO 如果进直播间测试次数设置为0,会崩溃,杨帆后续改 try: enter_ent_result = enter_ent_calculate_new(device_name) except Exception, e: MLog.error(u"测试进直播间计算的时候出现崩溃了 + error = ") MLog.error(traceback.format_exc(e)) enter_ent_result = []
class BaseConfig(object): def __init__(self): self.conf = Config("default.ini") self.param = None print "BaseConfig init" def setParams(self, params): self.param = params def getParms(self): return self.param def getFirstStartTime(self): if self.param: return self.param.first_start_times else: self.firstTime = self.conf.getconf("default").first_start return self.firstTime def getNormalStartTime(self): if self.param: return self.param.normal_start_times else: self.normalTime = self.conf.getconf("default").normal_start return self.normalTime def getEnterLiveRoom(self): if self.param: return self.param.enter_liveroom_times else: self.enterLiveRomm = self.conf.getconf("default").enter_liveroom return self.enterLiveRomm def getApkName(self): self.apkName = self.conf.getconf("default").apk_name return self.apkName def getAppName(self): if self.param: return self.param.app_name else: self.appName = self.conf.getconf("default").app_name return self.appName def getPackage(self): if self.param: return self.param.package_name else: self.packageName = self.conf.getconf("default").package return self.packageName def getFeaturePath(self): self.featurePath = self.conf.getconf("default").feature_path return self.featurePath def getMethod(self): return self.param.install_method
def multi_huya_calculate_parts(params): device_name = params["device"] # 设备名称 name_with_suffix = device_name + "_first" dir_index = params["dir_index"] # 当前算的第几组数据 # rgb_folder = calculate_homepage_rgb() # 计算样本库的rgb均值 conf = Config("apk.ini") conf_default = Config("default.ini") app_key = conf_default.getconf("default").app feature_dir = conf.getconf(app_key).feature # 特征图的文件夹名字 file_count = count_file("./screenrecord/" + name_with_suffix + "/" + name_with_suffix + "_" + str(dir_index)) real_path = "./screenrecord/" + name_with_suffix + "/" + name_with_suffix + "_" + str( dir_index) + "/" real_first_feature_path = path + "/picrepos/feature/" + feature_dir + "/" + device_name + "_launch_feature.jpg" if not exists(real_first_feature_path): MLog.debug( "calculate: first, there is no adapted feature pic for current Phone" ) real_first_feature_path = path + "/picrepos/feature/" + feature_dir + "/common_launch_feature.jpg" # 中间launching判断 real_launching_feature_path = path + "/picrepos/feature/" + feature_dir + "/" + device_name + "_launching_feature.jpg" real_last_feature_path = path + "/picrepos/feature/" + feature_dir + "/" + device_name + "_homepage_feature.jpg" first = first_frame_find(file_count, real_path, real_first_feature_path) # 取图片这些步骤好繁琐啊,想想有没办法改进下 launching_index, last = huya_first_find_frame( file_count, first, real_path, real_launching_feature_path, real_last_feature_path, rgb_folder) # 虎牙的计算也就只有这句不一样吊 # frame_value = settings.get_value("ffmpeg") frame_value = 50 total_time = int((last - first + 1) * (1000 / float(frame_value))) launching_time = int( (launching_index - first + 1) * (1000 / float(frame_value))) return dir_index, first, launching_index, last, total_time, launching_time, total_time - launching_time
def sendEmailWithDefaultConfig(): user = u"*****@*****.**" password = u"lcqctgdcbvklghde" to_users = u"[email protected], [email protected],[email protected]" conf = Config("default.ini") apk_name = conf.getconf("default").apk_name conf = Config("default.ini") event = conf.getconf("serial").serial_number serial = event.split(',') deviceInfo = DeviceInfo(serial[0]) subject = deviceInfo.getDeviceInfo() + apk_name + u"启动时间数据分析" content = u"数据分析详见附件:" contentType = u"application/octet-stream" try: log_file = make_log_patch() patchFile = [] MLog.debug(u"sendmail sendEmailWithDefaultConfig: 收集邮件附件:") for files in os.walk(chart_data_path): for f in files[2]: new_file_path = files[0] + f patchFile.append(new_file_path) patchFile.append(log_file) MLog.debug(u"sendmail sendEmailWithDefaultConfig: " + str(patchFile)) except Exception, e: MLog.error(u"sendmail sendEmailWithDefaultConfig: 收集附件失败! e = " + repr(e)) patchFile = None
def initConfig(self): self.isRunning = False conf = Config("default.ini") self.sdkPath = conf.getconf("default").sdk_path # sdk路径 self.sdkPathEntry.delete(0, END) self.sdkPathEntry.insert(0, self.sdkPath) if self.combox.get() == "自动启动": self.installMethos = 1 else: self.installMethos = 2 self.videoPath = "" self.firstStartTime = conf.getconf("default").first_start # 首次启动次数 self.firstStartEntry.delete(0, END) self.firstStartEntry.insert(0, self.firstStartTime) self.normaStartTime = conf.getconf("default").normal_start # 正常启动次数 self.normalStartEntry.delete(0, END) self.normalStartEntry.insert(0, self.normaStartTime) self.enterLiveRoonTime = conf.getconf("default").enter_liveroom # 进入直播间次数 self.enterLiveRoomEntry.delete(0, END) self.enterLiveRoomEntry.insert(0, self.enterLiveRoonTime) self.appName = conf.getconf("default").app_name # app名称 self.appNameEntry.delete(0, END) self.appNameEntry.insert(0, self.appName) self.packageName = conf.getconf("default").package # 包名 self.packageNameEntry.delete(0, END) self.packageNameEntry.insert(0, self.packageName) self.appPath = conf.getconf("default").app_path # 安装包地址 self.appPathEntry.delete(0, END) self.appPathEntry.insert(0, self.appPath) self.features = "" # 特征图
def calculate_repos_rgb(): conf = Config("apk.ini") conf_default = Config("default.ini") app_key = conf_default.getconf("default").app real_homepage = conf.getconf(app_key).homepage # homepage_dir = conf.getconf(real_homepage).feature path = feature_path + "/picrepos/homepage/" + real_homepage + "/" mean_r = 0 mean_g = 0 mean_b = 0 length_file = base_utils.count_file(feature_path + "/picrepos/homepage/" + real_homepage) for i in range(1, length_file + 1): rgb = calculate_pic_rgb(path + base_utils.adapter_num(i) + ".jpg") mean_r += rgb[0] mean_g += rgb[1] mean_b += rgb[2] mean_r /= length_file mean_g /= length_file mean_b /= length_file MLog.debug("calculate_repos_rgb: the folder mean rgb: r = {}, g = {}, b = {}".format(mean_r, mean_g, mean_b)) return mean_r, mean_g, mean_b
# coding=utf-8 from PIL import Image import base_utils # 计算图片的rgb均值 from config.configs import Config from log.log import MLog conf = Config("default.ini") feature_path = conf.getconf("default").feature_path def calculate_pic_rgb(path): im = Image.open(path) pix = im.load() width = im.size[0] height = im.size[1] mean_r = 0 mean_g = 0 mean_b = 0 for x in range(width): for y in range(height): r, g, b = pix[x, y] mean_r += r mean_g += g mean_b += b # print "r = {}, g = {}, b = {}".format(r, g, b) mean_r /= (width * height) mean_g /= (width * height) mean_b /= (width * height)
def getApkName(): conf = Config("default.ini") apkName = conf.getconf("default").apk_name return apkName
def main(): # Parse the command line arguments prog = argparse.ArgumentParser() prog.add_argument('--model', type=str, default='KernelGANFKP', help='KernelGANFKP or KernelGAN.') prog.add_argument('--dataset', '-d', type=str, default='../data/datasets/DIV2K/KernelGANFKP_lr_x2', help='dataset, e.g., DIV2K.') prog.add_argument( '--sf', type=str, default='2', help='The wanted SR scale factor, KernelGAN only supports 2 or 4.') prog.add_argument('--SR', action='store_true', default=False, help='when activated - nonblind SR is performed') prog.add_argument( '--real', action='store_true', default=False, help='if the input is real image, to be overwritten automatically.') prog.add_argument('--path-KP', type=str, default='../data/pretrained_models/FKP_x2.pt', help='path for trained kernel prior') prog.add_argument('--path-nonblind', type=str, default='../data/pretrained_models/usrnet_tiny.pth', help='path for trained nonblind model') # to be overwritten automatically prog.add_argument( '--input-dir', '-i', type=str, default='../data/datasets/DIV2K/KernelGANFKP_lr_x2', help='path to image input directory, to be overwritten automatically.') prog.add_argument( '--output-dir', '-o', type=str, default='../data/log_KernelGANFKP/DIV2K_KernelGANFKP_lr_x2', help='path to image output directory, to be overwritten automatically.' ) prog.add_argument( '--X4', action='store_true', default=False, help='The wanted SR scale factor, to be overwritten automatically.') args = prog.parse_args() # overwritting paths if args.sf == '2': args.X4 = False elif args.sf == '4': args.X4 = True else: print('KernelGAN-FKP only supports X2 and X4') prog.exit(0) args.input_dir = '../data/datasets/{}/KernelGANFKP_lr_x{}'.format( args.dataset, 4 if args.X4 else 2) args.output_dir = '../data/log_KernelGANFKP/{}_{}_lr_x{}'.format( args.dataset, args.model, 4 if args.X4 else 2) # load nonblind model if args.SR: netG = USRNet(n_iter=6, h_nc=32, in_nc=4, out_nc=3, nc=[16, 32, 64, 64], nb=2, act_mode="R", downsample_mode='strideconv', upsample_mode="convtranspose") netG.load_state_dict(torch.load(args.path_nonblind), strict=True) netG.eval() for key, v in netG.named_parameters(): v.requires_grad = False netG = netG.cuda() filesource = os.listdir(os.path.abspath(args.input_dir)) filesource.sort() for filename in filesource[:]: print(filename) # kernel estimation if args.model == 'KernelGANFKP': conf = Config_FKP().parse(create_params(filename, args)) kernel = train_FKP(conf) elif args.model == 'KernelGAN': conf = Config().parse(create_params(filename, args)) kernel = train(conf) # nonblind SR if args.SR: kernel = map2tensor(kernel) lr = im2tensor01(read_image(os.path.join(args.input_dir, filename))).unsqueeze(0) sr = netG(lr, torch.flip(kernel, [2, 3]), 4 if args.X4 else 2, (10 if args.real else 0) / 255 * torch.ones([1, 1, 1, 1]).cuda()) plt.imsave(os.path.join(conf.output_dir_path, '%s.png' % conf.img_name), tensor2im01(sr), vmin=0, vmax=1., dpi=1) if not conf.verbose and args.SR: evaluation_dataset(args.input_dir, conf) prog.exit(0)
def __init__(self): self.conf = Config("default.ini") self.param = None print "BaseConfig init"
def main(): # Parse the command line arguments prog = argparse.ArgumentParser() prog.add_argument('--model', type=str, default='DIPFKP', help='models: DIPFKP, DIPSoftmax, DoubleDIP.') prog.add_argument('--dataset', '-d', type=str, default='Set5', help='dataset, e.g., Set5.') prog.add_argument('--sf', type=str, default='2', help='The wanted SR scale factor') prog.add_argument('--path-nonblind', type=str, default='../data/pretrained_models/usrnet_tiny.pth', help='path for trained nonblind model') prog.add_argument('--SR', action='store_true', default=False, help='when activated - nonblind SR is performed') prog.add_argument('--real', action='store_true', default=False, help='if the input is real image') # to be overwritten automatically prog.add_argument('--path-KP', type=str, default='../data/pretrained_models/FKP_x2.pt', help='path for trained kernel prior') prog.add_argument('--input-dir', '-i', type=str, default='../data/datasets/Set5/DIPFKP_lr_x2', help='path to image input directory.') prog.add_argument('--output-dir', '-o', type=str, default='../data/log_KernelGANFKP/Set5_DIPFKP_lr_x2', help='path to image output directory') args = prog.parse_args() # overwritting paths args.path_KP = '../data/pretrained_models/FKP_x{}.pt'.format(args.sf) args.input_dir = '../data/datasets/{}/DIPFKP_lr_x{}'.format(args.dataset, args.sf) args.output_dir = '../data/log_DIPFKP/{}_{}_lr_x{}'.format(args.dataset, args.model, args.sf) # load nonblind model if args.SR: netG = USRNet(n_iter=6, h_nc=32, in_nc=4, out_nc=3, nc=[16, 32, 64, 64], nb=2, act_mode="R", downsample_mode='strideconv', upsample_mode="convtranspose") netG.load_state_dict(torch.load(args.path_nonblind), strict=True) netG.eval() for key, v in netG.named_parameters(): v.requires_grad = False netG = netG.cuda() filesource = os.listdir(os.path.abspath(args.input_dir)) filesource.sort() for filename in filesource[:]: print(filename) # kernel estimation conf = Config().parse(create_params(filename, args)) lr_image = im2tensor01(read_image(os.path.join(args.input_dir, filename))).unsqueeze(0) # crop the image to 960x960 due to memory limit if 'DIV2K' in args.input_dir: crop = int(960 / 2 / conf.sf) lr_image = lr_image[:, :, lr_image.shape[2] // 2 - crop: lr_image.shape[2] // 2 + crop, lr_image.shape[3] // 2 - crop: lr_image.shape[3] // 2 + crop] kernel, sr_dip = train(conf, lr_image) plt.imsave(os.path.join(conf.output_dir_path, '%s.png' % conf.img_name), tensor2im01(sr_dip), vmin=0, vmax=1., dpi=1) # nonblind SR if args.SR: kernel = map2tensor(kernel) sr = netG(lr_image, torch.flip(kernel, [2, 3]), int(args.sf), (10 if args.real else 0) / 255 * torch.ones([1, 1, 1, 1]).cuda()) plt.imsave(os.path.join(conf.output_dir_path, '%s.png' % conf.img_name), tensor2im01(sr), vmin=0, vmax=1., dpi=1) if not conf.verbose: evaluation_dataset(args.input_dir, conf) prog.exit(0)
import os try: from config.configs import Config except ImportError: from config.default import DefaultConfig as Config config = Config()