示例#1
0
def calibrate_sigw1_metric(config, x_future, x_past):
    sigs_past = config.compute_sig_past(x_past)
    sigs_future = config.compute_sig_future(x_future)
    assert sigs_past.size(0) == sigs_future.size(0)
    X, Y = to_numpy(sigs_past), to_numpy(sigs_future)
    lm = LinearRegression()
    lm.fit(X, Y)
    sigs_pred = torch.from_numpy(lm.predict(X)).float().to(x_future.device)
    return sigs_pred
def compute_predictive_score(x_past, x_future, x_fake):
    size = x_fake.shape[0]
    X = to_numpy(x_past.reshape(size, -1))
    Y = to_numpy(x_fake.reshape(size, -1))
    size = x_past.shape[0]
    X_test = X.copy()
    Y_test = to_numpy(x_future[:, :1].reshape(size, -1))
    model = LinearRegression()
    model.fit(X, Y)  # TSTR
    r2_tstr = model.score(X_test, Y_test)
    model = LinearRegression()
    model.fit(X_test, Y_test)  # TRTR
    r2_trtr = model.score(X_test, Y_test)
    return dict(r2_tstr=r2_tstr, r2_trtr=r2_trtr, predictive_score=np.abs(r2_trtr - r2_tstr))
示例#3
0
    def _val_single_step(self, batch):
        """


        """
        start_step = time.time()

        self.model.eval()

        frontal = to_device(batch['frontal'], self.device)
        lateral = to_device(batch['lateral'], self.device)
        labels = to_device(batch['labels'], self.device)
        mask = to_device(batch['mask'], self.device)

        start_forward = time.time()
        logits = self.model(frontal, lateral)
        loss = self.criterion(logits, labels, mask)
        end_forward = time.time()

        mask = to_numpy(batch['mask'])
        labels = to_numpy(batch['labels'].long())
        scores = to_numpy(torch.sigmoid(logits))

        self.pr_meter.add_predictions(mask, scores, labels)
        self.auc_meter.add_predictions(mask, scores, labels)

        results = []
        if self.save_predictions:
            for i in range(len(labels)):
                results.append({
                    'patient': batch['patient'][i],
                    'study_id': batch['study_id'][i],
                    'frontal': batch['frontal_fn'][i],
                    'lateral': batch['lateral_fn'][i],
                    'labels': labels[i],
                    'mask': mask[i],
                    'scores': scores[i],
                })

        end_step = time.time()

        meta = {}
        meta['val_time'] = end_step - start_step
        meta['forward_time'] = end_forward - start_forward
        meta['learning_rate'] = self.optimizer.param_groups[-1]['lr']
        meta['memory_allocated'] = torch.cuda.memory_allocated()

        metrics = {}
        metrics['loss'] = loss.item()
        return {'meta': meta, 'metrics': metrics, 'results': results}
示例#4
0
def test():
    # lmdb_path = "/share/zhui/reg_dataset/NIPS2014"
    lmdb_path = "/share/zhui/reg_dataset/IIIT5K_3000"
    train_dataset = LmdbDataset(root=lmdb_path,
                                voc_type='ALLCASES_SYMBOLS',
                                max_len=50)
    batch_size = 1
    train_dataloader = data.DataLoader(
        train_dataset,
        batch_size=batch_size,
        shuffle=False,
        num_workers=4,
        collate_fn=AlignCollate(imgH=64, imgW=256, keep_ratio=False))

    for i, (images, labels, label_lens) in enumerate(train_dataloader):
        # visualization of input image
        # toPILImage = transforms.ToPILImage()
        images = images.permute(0, 2, 3, 1)
        images = to_numpy(images)
        images = images * 0.5 + 0.5
        images = images * 255
        for id, (image, label,
                 label_len) in enumerate(zip(images, labels, label_lens)):
            image = Image.fromarray(np.uint8(image))
            # image = toPILImage(image)
            image.show()
            print(image.size)
            print(
                labels2strs(label, train_dataset.id2char,
                            train_dataset.char2id))
            print(label_len.item())
            input()
示例#5
0
def compare_cross_corr(x_real, x_fake):
    """ Computes cross correlation matrices of x_real and x_fake and plots them. """
    x_real = x_real.reshape(-1, x_real.shape[2])
    x_fake = x_fake.reshape(-1, x_fake.shape[2])
    cc_real = np.corrcoef(to_numpy(x_real).T)
    cc_fake = np.corrcoef(to_numpy(x_fake).T)

    vmin = min(cc_fake.min(), cc_real.min())
    vmax = max(cc_fake.max(), cc_real.max())

    fig, axes = plt.subplots(1, 2)
    axes[0].matshow(cc_real, vmin=vmin, vmax=vmax)
    im = axes[1].matshow(cc_fake, vmin=vmin, vmax=vmax)

    axes[0].set_title('Real')
    axes[1].set_title('Generated')
    fig.subplots_adjust(right=0.8)
    cbar_ax = fig.add_axes([0.85, 0.15, 0.05, 0.7])
    fig.colorbar(im, cax=cbar_ax)
示例#6
0
文件: agent.py 项目: wtsitp/AMC
    def select_action(self, s_t, episode):
        # assert episode >= self.warmup, 'Episode: {} warmup: {}'.format(episode, self.warmup)
        action = to_numpy(self.actor(to_tensor(np.array(s_t).reshape(
            1, -1)))).squeeze(0)
        delta = self.init_delta * (self.delta_decay**(episode - self.warmup))
        # action += self.is_training * max(self.epsilon, 0) * self.random_process.sample()
        action = self.sample_from_truncated_normal_distribution(
            lower=self.lbound, upper=self.rbound, mu=action, sigma=delta)
        action = np.clip(action, self.lbound, self.rbound)

        # self.a_t = action
        return action
示例#7
0
def plot_summary(x_fake, x_real, max_lag=None, labels=None):
    if max_lag is None:
        max_lag = min(128, x_fake.shape[1])

    from lib.test_metrics import skew_torch, kurtosis_torch
    dim = x_real.shape[2]
    _, axes = plt.subplots(dim, 3, figsize=(25, dim * 5))

    if len(axes.shape) == 1:
        axes = axes[None, ...]
    for i in range(dim):
        x_real_i = x_real[..., i:i + 1]
        x_fake_i = x_fake[..., i:i + 1]

        compare_hists(x_real=to_numpy(x_real_i), x_fake=to_numpy(x_fake_i), ax=axes[i, 0])

        def text_box(x, height, title):
            textstr = '\n'.join((
                r'%s' % (title,),
                # t'abs_metric=%.2f' % abs_metric
                r'$s=%.2f$' % (skew_torch(x).item(),),
                r'$\kappa=%.2f$' % (kurtosis_torch(x).item(),))
            )
            props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)
            axes[i, 0].text(
                0.05, height, textstr,
                transform=axes[i, 0].transAxes,
                fontsize=14,
                verticalalignment='top',
                bbox=props
            )

        text_box(x_real_i, 0.95, 'Historical')
        text_box(x_fake_i, 0.70, 'Generated')

        compare_hists(x_real=to_numpy(x_real_i), x_fake=to_numpy(x_fake_i), ax=axes[i, 1], log=True)
        compare_acf(x_real=x_real_i, x_fake=x_fake_i, ax=axes[i, 2], max_lag=max_lag, CI=False, dim=(0, 1))
示例#8
0
    def select_action(self, s_t, episode):
        # assert episode >= self.warmup, 'Episode: {} warmup: {}'.format(episode, self.warmup)
        # squeeze()可以删除但唯独条目,即把shape为1的维度去掉
        action = to_numpy(self.actor(to_tensor(np.array(s_t).reshape(1, -1)))).squeeze(0)

        # 通过默认值为0.95的衰减率求得目前的delta作为截断正态分布的方差
        delta = self.init_delta * (self.delta_decay ** (episode - self.warmup))
        # action += self.is_training * max(self.epsilon, 0) * self.random_process.sample()
        # 根据action与delta得到一个截断正态分布的采样,相当于有加入了一定的随机性
        action = self.sample_from_truncated_normal_distribution(lower=self.lbound, upper=self.rbound, mu=action, sigma=delta)
        # 没必要吧,毕竟上一句已经能够限定范围了
        action = np.clip(action, self.lbound, self.rbound)

        # self.a_t = action
        return action
 def evaluate(self, x_fake):
     for test_metric in self.test_metrics_list:
         with torch.no_grad():
             test_metric(x_fake[:10000])
         self.training_loss[test_metric.name].append(
             to_numpy(test_metric.loss_componentwise))
示例#10
0
def test():
  train_dataset = LmdbDataset(
    root=global_args.test_data_dir,
    voc_type=global_args.voc_type,
    max_len=global_args.max_len,
    num_samples=global_args.num_test,
    with_name=True)
  print(train_dataset.nSamples, 'samples')
  train_dataloader = data.DataLoader(
        train_dataset,
        batch_size=1,
        shuffle=False,
        num_workers=global_args.workers,
        collate_fn=AlignCollateWithNames(imgH=64, imgW=256, keep_ratio=False))

  if global_args.image_path:
    out_html = open(os.path.join(global_args.image_path, 'index.html'), 'w')
    out_html.write('''<html>
<body>
<table>
<tr><th>No</th><th>Image</th><th>Labels</th><th>Length</th><th>Name</th></tr>
''')
  else:
    out_html = None

  i = 1
  max_len = 0
  for images, labels, label_lens, image_names in train_dataloader:
    # visualization of input image
    # toPILImage = transforms.ToPILImage()
    images = images.permute(0,2,3,1)
    images = to_numpy(images)
    images = images * 0.5 + 0.5
    images = images * 255
    for image, label, label_len, image_name in zip(images, labels, label_lens, image_names):
      image = Image.fromarray(np.uint8(image))
      label_str = labels2strs(label, train_dataset.id2char, train_dataset.char2id)
      if image_name is not None:
        image_name = image_name.decode('utf-8')
      else:
        image_name = ''
      l_len = label_len.item()
      if max_len < l_len:
        max_len = l_len

      if global_args.image_path:
        image_filename = f'image-{i:09d}.jpg'
        image.save(os.path.join(global_args.image_path, image_filename))
        out_html.write(
          f'<tr><td>{i}</td>'
          f'<td><img src="{image_filename}" width="{image.width}" height="{image.height}" /></td>'
          f'<td>{label_str}</td><td>{l_len}</td><td>{image_name}</td></tr>\n')
      else:
        image.show()
        print(image.size)
        print(label_str, l_len)
        if image_name:
          print(image_name)
        input()
      i += 1

  if out_html:
    out_html.write(f'</table>\n<p>The maximal label length is {max_len}.</p>\n</body>\n</html>\n')
    out_html.close()
def evaluate_generator(model_name, seed, experiment_dir, dataset, use_cuda=True):
    """

    Args:
        model_name:
        experiment_dir:
        dataset:
        use_cuda:

    Returns:

    """
    torch.random.manual_seed(0)
    if use_cuda:
        device = 'cuda'
    else:
        device = 'cpu'
    experiment_summary = dict()
    experiment_summary['model_id'] = model_name
    experiment_summary['seed'] = seed

    sig_config = get_algo_config(dataset, experiment_dir)
    # shorthands
    base_config = BaseConfig(device=device)
    p, q = base_config.p, base_config.q
    # ----------------------------------------------
    # Load and prepare real path.
    # ----------------------------------------------
    x_real = load_pickle(os.path.join(os.path.dirname(experiment_dir), 'x_real.torch')).to(device)
    x_past, x_future = x_real[:, :p], x_real[:, p:p + q]
    x_future = x_real[:, p:p + q]
    dim = x_real.shape[-1]
    # ----------------------------------------------
    # Load generator weights and hyperparameters
    # ----------------------------------------------
    G_weights = load_pickle(os.path.join(experiment_dir, 'G_weights.torch'))
    G = SimpleGenerator(dim * p, dim, 3 * (50,), dim).to(device)
    G.load_state_dict(G_weights)
    # ----------------------------------------------
    # Compute predictive score - TSTR (train on synthetic, test on real)
    # ----------------------------------------------
    with torch.no_grad():
        x_fake = G.sample(1, x_past)
    predict_score_dict = compute_predictive_score(x_past, x_future, x_fake)
    experiment_summary.update(predict_score_dict)
    # ----------------------------------------------
    # Compute metrics and scores of the unconditional distribution.
    # ----------------------------------------------
    with torch.no_grad():
        x_fake = G.sample(q, x_past)
    test_metrics_dict = compute_test_metrics(x_fake, x_real)
    experiment_summary.update(test_metrics_dict)
    # ----------------------------------------------
    # Compute Sig-W_1 distance.
    # ----------------------------------------------
    if dataset in ['VAR', 'ARCH']:
        x_past = x_past[::10]
        x_future = x_future[::10]
    sigs_pred = calibrate_sigw1_metric(sig_config, x_future, x_past)
    # generate fake paths
    sigs_conditional = list()
    with torch.no_grad():
        steps = 100
        size = x_past.size(0) // steps
        for i in range(steps):
            x_past_sample = x_past[i * size:(i + 1) * size] if i < (steps - 1) else x_past[i * size:]
            sigs_fake_ce = sample_sig_fake(G, q, sig_config, x_past_sample)[0]
            sigs_conditional.append(sigs_fake_ce)
        sigs_conditional = torch.cat(sigs_conditional, dim=0)
        sig_w1_metric = sigcwgan_loss(sigs_pred, sigs_conditional)
    experiment_summary['sig_w1_metric'] = sig_w1_metric.item()
    # ----------------------------------------------
    # Create the relevant summary plots.
    # ----------------------------------------------
    with torch.no_grad():
        _x_past = x_past.clone().repeat(5, 1, 1) if dataset in ['STOCKS', 'ECG'] else x_past.clone()
        x_fake_future = G.sample(q, _x_past)
        plot_summary(x_fake=x_fake_future, x_real=x_real, max_lag=q)
    plt.savefig(os.path.join(experiment_dir, 'summary.png'))
    plt.close()
    if is_multivariate(x_real):
        compare_cross_corr(x_fake=x_fake_future, x_real=x_real)
        plt.savefig(os.path.join(experiment_dir, 'cross_correl.png'))
        plt.close()
    # ----------------------------------------------
    # Generate long paths
    # ----------------------------------------------
    with torch.no_grad():
        x_fake = G.sample(8000, x_past[0:1])
    plot_summary(x_fake=x_fake, x_real=x_real, max_lag=q)
    plt.savefig(os.path.join(experiment_dir, 'summary_long.png'))
    plt.close()
    plt.plot(to_numpy(x_fake[0, :1000]))
    plt.savefig(os.path.join(experiment_dir, 'long_path.png'))
    plt.close()
    return experiment_summary
示例#12
0
def plot_signature(signature_tensor, alpha=0.2):
    plt.plot(to_numpy(signature_tensor).T, alpha=alpha, linestyle='None', marker='o')
    plt.grid()