Exemplo n.º 1
0
    def servant_scroll(self, line):
        if not self.parse_scene_condition(["$all", [["滚动条-上"], ["滚动条-下"]]]):
            self.notice("Can't Scroll")
            return

        if abs(line) < 3:
            mid_x = config.getint("Device", "MainWidth") / 2
            mid_y = config.getint("Device", "MainHeight") / 2
            rand_drag(
                self.hwnd,
                rand_point([mid_x, mid_y], [50, 10]),
                rand_point([mid_x, mid_y - mid_y * line * 0.5], [50, 10]),
                30,
            )
            return
        _, top_xy = self.search_resource("滚动条-上")
        _, bot_xy = self.search_resource("滚动条-下")
        width = self.resources["滚动条-上"]["Size"][0]
        top = top_xy[1]
        bottom = bot_xy[1]
        x = top_xy[0] + width / 2
        middle = (top + bottom) // 2
        cross = bottom - top
        dy = line * cross * 0.37
        rand_drag(
            self.hwnd,
            rand_point([x, middle], [width / 6, cross / 6]),
            rand_point([x, middle + dy], [50, 10]),
            30,
        )
Exemplo n.º 2
0
def update_resource(resource, section, name=None):
    """根据当前配置更新资源属性, 加载图片并进行缩放"""
    dw = config.getint("Device", "MainWidth")
    dh = config.getint("Device", "MainHeight")
    sdw, sdh = resource["MainSize"]
    if dw != sdw or dh != sdh:
        for key in resource:
            if re.search("Offset|Position|Size", key):
                resource[key] = rescale_item(resource[key], dw / sdw, dh / sdh)
    if (
        resource.get("Image", None) is not None
        or resource.get("ImageData", None) is not None
    ):
        load_image(resource, section)
    if name is not None and "Name" not in resource:
        resource["Name"] = name
Exemplo n.º 3
0
def get_window_shot(hwnd):
    # 对后台应用程序截图,程序窗口可以被覆盖,但如果最小化后只能截取到标题栏、菜单栏等。

    # 使用自定义的窗口边缘和大小设置
    dx = config.getint("Device", "EdgeOffsetX")
    dy = config.getint("Device", "EdgeOffsetY")
    w = config.getint("Device", "MainWidth")
    h = config.getint("Device", "MainHeight")
    window_w, window_h = detect_window_size(hwnd)
    if dx + w > window_w or dy + h > window_h:
        raise ValueError("截图区域超出窗口! 请检查配置文件")
    # logger.debug("截图: %dx%d at %dx%d", w, h, dx, dy)

    # 返回句柄窗口的设备环境、覆盖整个窗口,包括非客户区,标题栏,菜单,边框
    hwndDC = win32gui.GetWindowDC(hwnd)
    # 创建设备描述表
    mfcDC = win32ui.CreateDCFromHandle(hwndDC)
    # 创建内存设备描述表
    saveDC = mfcDC.CreateCompatibleDC()
    # 创建位图对象
    saveBitMap = win32ui.CreateBitmap()
    saveBitMap.CreateCompatibleBitmap(mfcDC, w, h)
    saveDC.SelectObject(saveBitMap)
    # 截图至内存设备描述表
    saveDC.BitBlt((0, 0), (w, h), mfcDC, (dx, dy), win32con.SRCCOPY)
    # 获取位图信息
    bmpinfo = saveBitMap.GetInfo()
    bmpdata = saveBitMap.GetBitmapBits(True)
    # 生成图像
    image_data = np.frombuffer(bmpdata, "uint8")
    image_data = image_data.reshape(
        (bmpinfo["bmHeight"], bmpinfo["bmWidth"], 4))
    image_data = cv.cvtColor(image_data, cv.COLOR_BGRA2BGR)
    # 内存释放
    win32gui.DeleteObject(saveBitMap.GetHandle())
    saveDC.DeleteDC()
    mfcDC.DeleteDC()
    win32gui.ReleaseDC(hwnd, hwndDC)

    return image_data
Exemplo n.º 4
0
    def __init__(
        self,
        train: bool = True,
        val: bool = False,
        transform: Optional[object] = None,
        target_transform: Optional[object] = None,
        debug_with_few_samples: bool = False,
        create_standardization_transform: bool = False,
    ) -> None:

        if train and val:
            print('"train" and "val" cannot be true at the same time!')
            raise ValueError

        min_year = config.getint('DataOptions', 'min_year')
        max_year = config.getint('DataOptions', 'max_year')
        max_train_year = config.getint('DataOptions', 'max_train_year')
        max_val_year = config.getint('DataOptions', 'max_val_year')
        remo_input_dir = config.get('Paths', 'remo_input')
        remo_target_dir = config.get('Paths', 'remo_target')
        elev_file = config.get('Paths', 'elevation')
        input_var = config.get('DataOptions', 'input_variable')
        target_var = config.get('DataOptions', 'target_variable')
        aux_base_path = config.get('Paths', 'aux_base_path')
        # The filter removes empty strings in the resulting list, which occur when there are no aux_variables specified
        aux_vars = list(
            filter(None,
                   config.get('DataOptions', 'aux_variables').split(',')))

        aux_vars = [join(aux_base_path, p) for p in aux_vars]

        if train:
            self.years = list(range(min_year, max_train_year + 1))
            print('Train years', self.years)
            self.mode = 'train'

        elif val:

            self.years = list(range(max_train_year + 1, max_val_year + 1))
            print('Validation years', self.years)
            self.mode = 'val'

        else:
            self.years = list(range(max_val_year + 1, max_year + 1))
            print('Test years', self.years)
            self.mode = 'test'

        self.transform = transform
        self.target_transform = target_transform

        # For testing purposes
        # self.years = [2000]

        self.dataset = RemoSuperRes(
            remo_input_dir,
            remo_target_dir,
            self.years,
            elev_file,
            input_var=input_var,
            target_var=target_var,
            aux_features=aux_vars,
        )
        # This name is misleading - it does not necessarily create test data
        (
            self.X,
            self.aux,
            self.elev_arr,
            self.Y,
            self.lats,
            self.lons,
            self.times,
        ) = self.dataset.make_test()

        if debug_with_few_samples:
            num_debug_samples = 16
            print(f'DEBUG: Using only {num_debug_samples} samples')
            self.X = self.X[:num_debug_samples]
            if self.aux is not None:
                self.aux = self.aux[:num_debug_samples]
            self.Y = self.Y[:num_debug_samples]
            self.times = self.times[:num_debug_samples]

        # Convert E-OBS temperature from Celsius to Kelvin
        if target_var == 'tg':
            self.Y += 272.15

        if create_standardization_transform:
            self.standardize_transform = self.calculate_standardization_transform(
            )
            if self.transform is not None:
                self.transform = Compose(
                    [self.transform, self.standardize_transform])
            else:
                self.transform = self.standardize_transform
Exemplo n.º 5
0
def rescale_point(hwnd, point):
    dpi = ctypes.windll.user32.GetDpiForWindow(hwnd)
    x = point[0] + config.getint("Device", "EdgeOffsetX")
    y = point[1] + config.getint("Device", "EdgeOffsetY")
    # logger.debug("Rescale: %s -> %s", point, (x, y))
    return int(np.round(x * 96 / dpi)), int(np.round(y * 96 / dpi))
Exemplo n.º 6
0
def export(lr: float, model_name: str, save_dir: str):

    models_available = {
        'globalnet': GlobalNet,
        'localnet': LocalNet,
        'convmos': ConvMOS,
    }

    model = models_available[model_name]()
    device = 'cpu'

    if torch.cuda.is_available():
        device = 'cuda'

    model = model.to(device=device)

    # E-OBS only provides observational data for land so we need to use a mask to avoid fitting on the sea
    land_mask_np = np.load('remo_eobs_land_mask.npy')
    # Convert booleans to 1 and 0, and convert numpy array to torch Tensor
    land_mask = torch.from_numpy(1 * land_mask_np).to(device)
    loss_fn = partial(masked_mse_loss, mask=land_mask)

    optimizer = Adam(model.parameters(), lr=lr)
    trainer = create_supervised_trainer(model,
                                        optimizer,
                                        loss_fn,
                                        device=device)

    to_save = {'model': model, 'optimizer': optimizer, 'trainer': trainer}

    checkpoint_files = glob(join(save_dir, 'best_checkpoint_*.pt'))
    if len(checkpoint_files) > 0:
        # Parse something like:
        # /scratch/scratch_remo/APRL-rr-11-11-sdnext-loglonet-prec-ger11-maskedloss-7/best_checkpoint_194_val_loss=-11.8250.pt
        # Sorry
        epoch_to_score = {
            int(c.split(sep)[-1].split('_')[2]):
            float(c.split(sep)[-1].split('=')[-1][:-3])
            for c in checkpoint_files
        }
        print(epoch_to_score)
        best_epoch = max(epoch_to_score, key=epoch_to_score.get)
        best_checkpoint_file = next(
            cf for cf in checkpoint_files
            if int(cf.split(sep)[-1].split('_')[2]) == best_epoch)
        print('Loading best checkpoint', best_checkpoint_file)

        checkpoint = torch.load(best_checkpoint_file, map_location=device)
        Checkpoint.load_objects(to_load=to_save, checkpoint=checkpoint)
    else:
        print(
            'ERROR: cannot find any files matching',
            join(save_dir, 'best_checkpoint_*.pt'),
        )
        return

    # This uses all aux variables, the temperature/precipitation, and elevation
    input_depth = (len(
        list(
            filter(None,
                   config.get('DataOptions', 'aux_variables').split(',')))) +
                   2)
    input_width = config.getint('NN', 'input_width')
    input_height = config.getint('NN', 'input_height')

    dummy_input = torch.randn(1,
                              input_depth,
                              input_width,
                              input_height,
                              device=device)

    torch.onnx.export(
        model,
        dummy_input,
        join(save_dir, 'convmos.onnx'),
        verbose=True,
        input_names=['input'],
        output_names=['output'],
    )
Exemplo n.º 7
0
        # directory already exists
        pass
    val_preds.to_netcdf(join(save_dir, f'val_predictions.nc'))
    test_preds.to_netcdf(join(save_dir, f'test_predictions.nc'))


if __name__ == "__main__":
    parser = ArgumentParser()
    parser.add_argument('--num_jobs',
                        type=int,
                        default=1,
                        help='Number of jobs (default: 1)')
    parser.add_argument(
        '--non_local_dist',
        type=int,
        default=config.getint('NN', 'non_local_dist'),
        help='Max cell distance to consider (Only relevant in non_local mode)',
    )
    parser.add_argument(
        '--n_components',
        type=float,
        default=config.getfloat('NN', 'n_components'),
        help=
        'Number of components to retain for PCA (Only relevant in non_local mode)',
    )
    parser.add_argument(
        '--n_param_sets',
        type=int,
        default=config.getint('NN', 'n_param_sets'),
        help='Number of parameter sets tried per cell in the hp search',
    )
Exemplo n.º 8
0
        {f'val_torch_{k}': v
         for k, v in val_evaluator.state.metrics.items()})
    # ... the validation metrics that I calculate,
    results.update({f'val_{k}': v for k, v in val_res.items()})
    # ... asnd the test metrics that I calculate
    results.update({f'test_{k}': v for k, v in test_res.items()})
    write_results_file(join('results', 'results.json'),
                       pd.json_normalize(results))


if __name__ == "__main__":
    parser = ArgumentParser()
    parser.add_argument(
        '--batch_size',
        type=int,
        default=config.getint('NN', 'batch_size'),
        help='input batch size for training (default: 64)',
    )
    parser.add_argument(
        '--val_batch_size',
        type=int,
        default=config.getint('NN', 'batch_size'),
        help='input batch size for validation (default: 1000)',
    )
    parser.add_argument(
        '--epochs',
        type=int,
        default=config.getint('NN', 'training_epochs'),
        help='number of epochs to train (default: 1000)',
    )
    parser.add_argument(