Example #1
0
def evaluate(args, epoch, model, data_loader):
    model.eval()
    losses = []
    start_epoch = time.time()
    psnr_tot = []
    with torch.no_grad():

        for iter, data in enumerate(data_loader):
            target, _ = data
            input, mask = apply_random_mask(target, args['rate'])
            target = torch.tensor(target).to(args['device'])
            input = torch.tensor(input).to(args['device'])
            mask = torch.tensor(mask).to(args['device'])
            output = model(input, mask)  #.squeeze(1)

            loss = F.mse_loss(output, target, reduction='sum')
            losses.append(loss.item())
            psnr_tot.append(
                np.mean([
                    psnr(t.cpu().numpy(),
                         o.cpu().numpy()) for t, o in zip(target, output)
                ]))
            #if iter > 10:
            #    break

    return np.mean(losses), np.mean(psnr_tot), time.time() - start_epoch
Example #2
0
def train_epoch(args, epoch, model, data_loader, optimizer):
    model.train()
    avg_loss = 0.
    start_epoch = start_iter = time.time()
    global_step = epoch * len(data_loader)
    for iter, data in enumerate(data_loader):
        target, _ = data

        input, mask = apply_random_mask(target, args['rate'])
        target = torch.tensor(target).to(args['device'])
        input = torch.tensor(input).to(args['device'])
        mask = torch.tensor(mask).to(args['device'])
        output = model(input, mask)  #.squeeze(1)

        loss = F.mse_loss(output, target)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        #if iter > 10:
        #    break
        if iter % args['report_interval'] == 0:
            print('Epoch = [{:3d}/{:3d}] '.format(epoch, args['num_epochs']) +
                  'Iter = [{:4d}/{:4d}] '.format(iter, len(data_loader)) +
                  'Loss = {:.4g} '.format(loss.item()) +
                  'Time = {:.4f}s'.format(time.time() - start_iter))
        start_iter = time.time()
    return avg_loss, time.time() - start_epoch
Example #3
0
 def get_new_examples(self, tokenizer, idxs, method="random", random_mask=None):
     batch = [self.examples[i] for i in idxs]
     if method == "random_mask":
         new_batch = apply_random_mask(batch, tokenizer, threshold=random_mask)
         return batch, new_batch
     old_batch = []
     new_batch = []
     for old_ex in examples:
         new_ex = self.get_new_example(old_ex, method)
         if new_ex != old_ex:
             old_batch.append(old_ex)
             new_batch.append(new_ex)
     return old_batch, new_batch
Example #4
0
def distant_train_examples(tokenizer,
                           lm='roberta',
                           source=None,
                           ext='',
                           num_examples=None,
                           mask=False,
                           random_mask=False,
                           mask_events=False):
    f = open('timex/orig/train_exs.pkl', 'rb')
    exs = pickle.load(f)
    if source == "even":
        exs = filter_distant_source(exs,
                                    num_examples=num_examples / 6,
                                    source="afp")
    else:
        exs = filter_distant_source(exs, source)

    if num_examples:
        if num_examples > len(exs):
            more_examples = _distant_parsed_examples(
                tokenizer,
                source=source,
                ext='',
                num_examples=num_examples - len(exs))
            exs += more_examples
        exs = exs[:num_examples]

    if random_mask:
        exs = apply_random_mask(exs, tokenizer)
    if mask:
        mask = 'distant'
    exs, feats = convert_examples_to_features(examples=exs,
                                              tokenizer=tokenizer,
                                              max_seq_length=MAX_SEQ_LENGTH,
                                              doc_stride=DOC_STRIDE,
                                              mask=mask,
                                              mask_events=mask_events)
    data = make_tensor_dataset(feats, model=lm)
    return exs, data
Example #5
0
def distant_parsed_examples(tokenizer,
                            lm='roberta',
                            ext='',
                            num_examples=None,
                            mask=False,
                            random_mask=False,
                            mask_events=False):
    exs = _distant_parsed_examples(tokenizer,
                                   ext=ext,
                                   num_examples=num_examples)
    if random_mask:
        exs = apply_random_mask(exs, tokenizer)
    if mask:
        mask = 'distant'
    print(len(exs), mask)
    exs, feats = convert_examples_to_features(examples=exs,
                                              tokenizer=tokenizer,
                                              max_seq_length=MAX_SEQ_LENGTH,
                                              doc_stride=DOC_STRIDE,
                                              mask=mask,
                                              mask_events=mask_events)
    data = make_tensor_dataset(feats, model=lm)
    return exs, data
Example #6
0
        'F*': None,
        'x0': np.zeros((shape[0] * shape[1], 1)),
        'restart_criterion': True,
        'stopping_criterion': 'rerr',
        'iter_print': 50,
        'shape': shape,
        'restart_param': 50,
        'verbose': True,
        'm': shape[0],
        'rate': 0.4,
        'N': shape[0] * shape[1]
    }
    PATH = 'data/gandalf.jpg'
    image = load_image(PATH, params['shape'])

    im_us, mask = apply_random_mask(image, params['rate'])
    indices = np.nonzero(mask.flatten(order='F'))[0]
    params['indices'] = indices

    # Wavelet operator
    r = RepresentationOperator(m=params["m"])

    # Define the overall operator
    forward_operator = lambda x: p_omega(r.WT(x), indices)  # P_Omega.W^T
    adjoint_operator = lambda x: r.W(p_omega_t(x, indices, params['m'])
                                     )  # W. P_Omega^T

    # Generate measurements
    b = p_omega(image.reshape(params['N'], 1), indices)

    fx = lambda x: 0.5 * np.linalg.norm(b - forward_operator(x))**2