Пример #1
0
    def __call__(self, x):
        h = x
        self.hiddens = []
        for i, layers in enumerate(zip(
            self.linears.values(), self.batch_norms.values())):

            linear, batch_norm = layers

            # Add noise
            if self.noise and not self.bn and not self.lateral and not self.test:
                if np.random.randint(0, 2):
                    n = np.random.normal(0, 0.03, h.data.shape).astype(np.float32)
                    n_ = Variable(to_device(n, self.device))
                    h = h + n_

            # Linear
            h = linear(h)

            # Batchnorm
            if self.bn:
                h = batch_norm(h, self.test)
                if self.noise and not self.test:
                    n = np.random.normal(0, 0.03, h.data.shape).astype(np.float32)
                    n_ = Variable(to_device(n, self.device))
                    h = h + n_
            
            if self.lateral and i != len(self.dims) - 2:
                self.hiddens.append(h)

            # Activation
            h = self.act(h)

        return h
Пример #2
0
 def _add_noise(self, h, test):
     if self.noise and not test:
         if np.random.randint(0, 2):
             n = np.random.normal(0, 0.03, h.data.shape).astype(np.float32)
             n_ = Variable(to_device(n, self.device))
             h = h + n_
     return h
Пример #3
0
 def generate_onehot(self, bs, y_l=None):
     y = np.zeros((bs, self.n_cls))
     if y_l is not None:
         y[np.arange(bs), y_l] = 1.0
     y = y.astype(np.float32)
     y = to_device(y, self.device)
     return y
Пример #4
0
    def __call__(self, x, test):
        h = x
        self.hiddens = []
        for i, layers in enumerate(zip(
                self.linears.values(), self.batch_norms.values(), self.scale_biases.values())):

            linear, batch_norm, scale_bias = layers

            # Add noise
            if self.noise and not test:
                if np.random.randint(0, 2):
                    n = np.random.normal(0, 0.03, h.data.shape).astype(np.float32)
                    n_ = Variable(to_device(n, self.device))
                    h = h + n_

            # Linear
            h = linear(h)

            # Batchnorm
            h = batch_norm(h, test)

            # Scale bias
            h = scale_bias(h)            

            # Activation  #TODO: have to add in the last layer?
            h = self.act(h)

            # RC after non-linearity
            if self.rc and i != len(self.dims) - 2:
                self.hiddens.append(h)

        return h
Пример #5
0
 def generate_onehotmap(self, bs, sd, y_l=None):
     y = np.zeros((bs, self.n_cls, sd, sd))
     if y_l is not None:
         for i in range(len(y)):
             y[i, y_l[i], :, :] = 1.
     y = y.astype(np.float32)
     y = to_device(y, self.device)
     return y
def stats(dataset, model):
    with torch.no_grad():
        test_model = eval('models.{}(model_rate=cfg["global_model_rate"], track=True).to(cfg["device"])'
                          .format(cfg['model_name']))
        test_model.load_state_dict(model.state_dict(), strict=False)
        data_loader = make_data_loader({'train': dataset})['train']
        test_model.train(True)
        for i, input in enumerate(data_loader):
            input = collate(input)
            input = to_device(input, cfg['device'])
            test_model(input)
    return test_model
Пример #7
0
    def zero_state(self, batch_size):
        # The axes semantics are (num_layers, batch_size, hidden_dim)
        nb_layers = self.num_layers if not self.bidirectional else self.nb_layers * 2
        state_shape = (nb_layers, batch_size, self.hidden_size)

        # shape: (num_layers, batch_size, hidden_dim)
        h = to_device(torch.zeros(*state_shape))

        # shape: (num_layers, batch_size, hidden_dim)
        c = torch.zeros_like(h)

        return h, c
Пример #8
0
def test(data_loader, model):
    with torch.no_grad():
        metric = Metric()
        model.train(False)
        for i, input in enumerate(data_loader):
            input = collate(input)
            input = to_device(input, config.PARAM['device'])
            output = model(input)
            output['loss'] = output['loss'].mean() if config.PARAM['world_size'] > 1 else output['loss']
        evaluation = metric.evaluate(config.PARAM['metric_names']['test'], input, output)
    print(evaluation)
    return evaluation
Пример #9
0
def estimate_advantages(rewards, masks, values, gamma, tau, device):
    rewards, masks, values = to_device(torch.device('cpu'), rewards, masks,
                                       values)
    tensor_type = type(rewards)
    deltas = tensor_type(rewards.size(0), 1)
    advantages = tensor_type(rewards.size(0), 1)

    prev_value = 0
    prev_advantage = 0
    for i in reversed(range(rewards.size(0))):
        deltas[i] = rewards[i] + gamma * prev_value * masks[i] - values[i]
        advantages[i] = deltas[i] + gamma * tau * prev_advantage * masks[i]

        prev_value = values[i, 0]
        prev_advantage = advantages[i, 0]

    returns = values + advantages
    advantages = (advantages - advantages.mean()) / advantages.std()

    advantages, returns = to_device(device, advantages, returns)
    return advantages, returns
Пример #10
0
def eval_error(total_step):
    cluster.eval_models()

    batch = next(eval_samples)
    batch['t'] = batch['t'].view(-1, 1)
    batch['v0_array'] = batch['v0_array'].view(-1, 1)
    batch['xy'] = batch['xy'].view(-1, 2)
    to_device(batch, device)

    feature = cluster.get_encoder(batch['img'], 0)
    feature_dim = feature.shape[-1]
    feature = feature.unsqueeze(1)
    feature = feature.expand(1, batch['t'].shape[0], feature_dim)
    feature = feature.reshape(1 * batch['t'].shape[0], feature_dim)

    output = cluster.get_trajectory(feature, batch['v0_array'], batch['t'], 0)

    output_xy = output[:, :2]
    logvar = output[:, 2:]

    real_traj = batch['xy'].data.cpu().numpy() * opt.max_dist
    fake_traj = output_xy.data.cpu().numpy() * opt.max_dist

    real_x = real_traj[:, 0]
    real_y = real_traj[:, 1]
    fake_x = fake_traj[:, 0]
    fake_y = fake_traj[:, 1]

    ex = np.mean(np.abs(fake_x - real_x))
    ey = np.mean(np.abs(fake_y - real_y))
    fde = np.hypot(fake_x - real_x, fake_y - real_y)[-1]
    ade = np.mean(np.hypot(fake_x - real_x, fake_y - real_y))

    logger = logger_cluster[0]
    logger.add_scalar('eval/ex', ex.item(), total_step)
    logger.add_scalar('eval/ey', ey.item(), total_step)
    logger.add_scalar('eval/fde', fde.item(), total_step)
    logger.add_scalar('eval/ade', ade.item(), total_step)

    cluster.train_models()
def test(dataset, data_split, label_split, model, logger, epoch):
    with torch.no_grad():
        metric = Metric()
        model.train(False)
        for m in range(cfg['num_users']):
            data_loader = make_data_loader(
                {'test': SplitDataset(dataset, data_split[m])})['test']
            for i, input in enumerate(data_loader):
                input = collate(input)
                input_size = input['img'].size(0)
                input['label_split'] = torch.tensor(label_split[m])
                input = to_device(input, cfg['device'])
                output = model(input)
                output['loss'] = output['loss'].mean(
                ) if cfg['world_size'] > 1 else output['loss']
                evaluation = metric.evaluate(
                    cfg['metric_name']['test']['Local'], input, output)
                logger.append(evaluation, 'test', input_size)
        data_loader = make_data_loader({'test': dataset})['test']
        for i, input in enumerate(data_loader):
            input = collate(input)
            input_size = input['img'].size(0)
            input = to_device(input, cfg['device'])
            output = model(input)
            output['loss'] = output['loss'].mean(
            ) if cfg['world_size'] > 1 else output['loss']
            evaluation = metric.evaluate(cfg['metric_name']['test']['Global'],
                                         input, output)
            logger.append(evaluation, 'test', input_size)
        info = {
            'info': [
                'Model: {}'.format(cfg['model_tag']),
                'Test Epoch: {}({:.0f}%)'.format(epoch, 100.)
            ]
        }
        logger.append(info, 'test', mean=False)
        logger.write(
            'test', cfg['metric_name']['test']['Local'] +
            cfg['metric_name']['test']['Global'])
    return
Пример #12
0
    def test(self, epoch, save=False):

        c_time = time.time()

        self.net0.eval()

        running_val_losses = np.zeros(4)

        N = self.val_n_img

        disparities_L = np.zeros((N, self.input_height, self.input_width), dtype=np.float32)
        images_L = np.zeros((N, 3, self.input_height, self.input_width), dtype=np.float32)
        gt_L = np.zeros((N, 375, 1242), dtype=np.float32)

        with torch.no_grad():
            for i, data in enumerate(self.val_loader):              
                data = to_device(data, self.device)
                out = self.compute(data)
                losses = self.criterion(out, epoch, self.loss_weights)
                loss = sum(losses)
                running_val_losses += np.array([l.item() for l in losses])

                left = out[0][0][0]
                right = out[0][1][0]
                disp_est_left = out[2][0][0]
                gt = out[3][0][:, 0, :, :]
                DR = disp_est_left.cpu().numpy()[:, 0, :, :]

                ndata, _, _ = DR.shape
                start = i*self.batch_size
                end = i*self.batch_size + ndata

                disparities_L[start:end] = DR
                gt_L[start:end] = gt.cpu().numpy()
                images_L[start:end] = left.cpu().numpy()

            running_val_losses /= self.val_n_img / self.batch_size
            running_val_loss = sum(running_val_losses)

            model_saved = '[*]'
            if save and running_val_loss < self.best_val_loss:
                self.save(True)
                self.best_val_loss = running_val_loss
                model_saved = '[S]'
            print(
                '      Test',
                'G: {} {:.3f}({:.3f})'.format(running_val_losses, running_val_loss, self.best_val_loss),
                'Time: {:.2f}s'.format(time.time() - c_time),
                model_saved
            )
        
        return disparities_L, images_L, gt_L
Пример #13
0
    def policy_loop(self, data):
        s, target_a, target_d = to_device(data)
        a_weights = self.policy(s)
        loss_a = self.multi_entropy_loss(a_weights, target_a)
        
        if self.train_direc=='opposite':
            target_d = torch.ones_like(target_d) - target_d
        d_weights = self.policy.forward_domain(s)
        loss_d = self.multi_entropy_loss(d_weights, target_d)
        if self.train_direc=='opposite':
            loss_d = - loss_d

        return loss_a + loss_d * self.mt_factor
Пример #14
0
    def update_function(engine, batch):
        model.train()
        optimizer.zero_grad()

        inputs, targets = to_device(batch)

        logits = model(inputs)
        loss = criterion(logits, targets)
        loss.backward()
        torch.nn.utils.clip_grad_norm_(model_parameters, cfg.max_grad_norm)
        optimizer.step()

        return loss.item()
Пример #15
0
    def user_loop(self, data):
        batch_input = to_device(padding_data(data))
        a_weights, t_weights, argu = self.user(batch_input['goals'], batch_input['goals_length'], \
                                         batch_input['posts'], batch_input['posts_length'], batch_input['origin_responses'])

        loss_a, targets_a = 0, batch_input[
            'origin_responses'][:, 1:]  # remove sos_id
        for i, a_weight in enumerate(a_weights):
            loss_a += self.nll_loss(a_weight, targets_a[:, i])
        loss_a /= i
        loss_t = self.bce_loss(t_weights, batch_input['terminal'])
        loss_a += self.cfg.alpha * kl_gaussian(argu)
        return loss_a, loss_t
Пример #16
0
    def seg_inference(self):
        self.model.eval()
        segmentations = np.zeros((self.n_img, 384, 384), dtype=np.float32)
        with torch.no_grad():
            for idx, data in enumerate(self.loader):
                data = to_device(data, self.device)
                left = data['image']
                # left = data['left']
                # right = data['right']

                # if self.args.task == 'depth':
                #     target_list = {'left': left, 'right': right}
                # elif self.args.task == 'seg' or self.args.task == 'both':
                #     left_label = data['left_label']
                #     right_label = data['right_label']
                #     left_index = data['left_index']
                #     right_index = data['right_index']
                #     target_list = {'left': left, 'right': right, 'left_label': left_label,
                #                    'right_label': right_label,
                #                    'left_index': left_index, 'right_index': right_index}
                # flip_right = torch.flip(right, [3])
                # left = data['image']
                #
                collection = self.model([left], self.args.task)

                # loss = self.loss_function(collection[0], data,
                #                           self.args.task, input_right=collection[1])
                prediction = nn.Softmax2d()(
                    collection[3].unsqueeze(0)).squeeze().permute(
                        1, 2, 0).detach().cpu().numpy()
                target = np.zeros(384 * 384).reshape((384, 384))
                for i in range(prediction.shape[0]):
                    for j in range(prediction.shape[1]):
                        anatomy = np.argmax(prediction[i][j])
                        if anatomy == 1:
                            target[i][j] = 80
                        elif anatomy == 2:
                            target[i][j] = 100
                        elif anatomy == 3:
                            target[i][j] = 140
                        elif anatomy == 4:
                            target[i][j] = 220
                # plt.imshow(target,cmap='gray')
                # plt.show()
                segmentations[idx] = target
                # print(prediction[0].shape)
                # exit(0)

                # right_prediction = collection[1].detach().cpu().numpy()
            np.save(self.output_directory + '/segmentation.npy', segmentations)
            print('Finished Testing')
Пример #17
0
def estimate_advantages(repeats, rewards, masks, values, gamma, tau, device):
    repeats, rewards, masks, values = to_device(torch.device('cpu'), repeats,
                                                rewards, masks, values)
    tensor_type = type(rewards)
    deltas = tensor_type(rewards.size(0), 1)
    advantages = tensor_type(rewards.size(0), 1)

    prev_value = 0
    prev_advantage = 0
    for i in reversed(range(rewards.size(0))):
        # modified TD-error calculation for Learn to Repeat n times.
        gamma_n = gamma**repeats[i]
        deltas[i] = rewards[i] + gamma_n * prev_value * masks[i] - values[i]
        advantages[i] = deltas[i] + gamma_n * tau * prev_advantage * masks[i]

        prev_value = gamma_n * values[i, 0]
        prev_advantage = gamma_n * advantages[i, 0]

    returns = values + advantages
    advantages = (advantages - advantages.mean()) / advantages.std()

    advantages, returns = to_device(device, advantages, returns)
    return advantages, returns
Пример #18
0
    def build_corpus(self, dataset, config):
        loader = DataLoader(dataset, **config.loader.to_dict())
        corpus = []
        with torch.no_grad():
            for batch_id, data in enumerate(prolog(loader, 'build corpus')):
                data = to_device(data)
                codes = self.get_code(data['uttr'], cands=True)

                for code, text_ids in zip(codes, data['uttr']['text_ids']):
                    corpus.append((text_ids, code))

        text_ids, codes = zip(*corpus)
        codes = torch.stack(codes, dim=0)
        return text_ids, codes
Пример #19
0
def train_one_epoch(model,
                    train_dl,
                    optimizer,
                    loss_fn,
                    device,
                    metric_fns=None):
    """
    Returns epoch's average loss (ie. loss per sample) and other metrics
    """
    to_device(model, device)
    model.train()
    result = {}

    # Average loss from the training this epochs
    epoch_loss = 0.
    sample_count = 0
    for sample in train_dl:
        x, y = sample['X'].to(device), sample['y'].to(device)
        y_pred = model(x)
        loss_mean = loss_fn(y_pred, y)

        # Backprop
        optimizer.zero_grad()
        loss_mean.backward()
        optimizer.step()

        epoch_loss += loss_mean.item() * len(x)
        sample_count += len(x)

    # Collect epoch stats
    epoch_loss /= sample_count
    result['epoch_loss'] = epoch_loss
    ## add other metrics
    # for mname, metric_fn in metric_fns.items():
    # ...

    return result
Пример #20
0
def InceptionScore(img, splits=1):
    with torch.no_grad():
        N = len(img)
        batch_size = 32
        data_loader = DataLoader(img, batch_size=batch_size)
        if cfg['data_name'] in ['COIL100', 'Omniglot']:
            model = models.classifier().to(cfg['device'])
            model_tag = ['0', cfg['data_name'], cfg['subset'], 'classifier']
            model_tag = '_'.join(filter(None, model_tag))
            checkpoint = load(
                './metrics_tf/res/classifier/{}_best.pt'.format(model_tag))
            model.load_state_dict(checkpoint['model_dict'])
            model.train(False)
            pred = torch.zeros((N, cfg['classes_size']))
            for i, input in enumerate(data_loader):
                input = {
                    'img': input,
                    'label': input.new_zeros(input.size(0)).long()
                }
                input = to_device(input, cfg['device'])
                input_size_i = input['img'].size(0)
                output = model(input)
                pred[i * batch_size:i * batch_size + input_size_i] = F.softmax(
                    output['label'], dim=-1).cpu()
        else:
            model = inception_v3(pretrained=True,
                                 transform_input=False).to(cfg['device'])
            model.train(False)
            up = nn.Upsample(size=(299, 299),
                             mode='bilinear',
                             align_corners=False)
            pred = torch.zeros((N, 1000))
            for i, input in enumerate(data_loader):
                input = input.to(cfg['device'])
                input_size_i = input.size(0)
                input = up(input)
                output = model(input)
                pred[i * batch_size:i * batch_size + input_size_i] = F.softmax(
                    output, dim=-1).cpu()
        split_scores = []
        for k in range(splits):
            part = pred[k * (N // splits):(k + 1) * (N // splits), :]
            py = torch.mean(part, dim=0)
            scores = F.kl_div(py.log().view(1, -1).expand_as(part),
                              part,
                              reduction='batchmean').exp()
            split_scores.append(scores)
        inception_score = np.mean(split_scores).item()
    return inception_score
Пример #21
0
def test_GAN():
    kitti_points_num = 10
    encoder.eval()

    batch = next(eval_samples)
    batch['t'] = batch['t'].view(-1,1)
    batch['v0_array'] = batch['v0_array'].view(-1,1)
    batch['xy'] = batch['xy'].view(-1,2)
    to_device(batch, device)

    real_traj = torch.zeros_like(batch['xy']).data.cpu().numpy()*opt.max_dist

    real_condition = batch['v_0']
    condition = real_condition.unsqueeze(1).expand(1, kitti_points_num, 1).reshape(1*kitti_points_num, 1)

    

    for total_step in range(1000):
        condition = torch.randn_like(condition).to(device)
        latent = torch.randn(1 * kitti_points_num, opt.vector_dim).to(device)

        output_xy = generator(condition, latent, batch['t'])
        fake_traj = output_xy.data.cpu().numpy()*opt.max_dist
        show_traj(fake_traj, real_traj, batch['t'].view(1, -1).data.cpu().numpy()[0], total_step)
Пример #22
0
    def __call__(self, y, ):
        bs = y.data.shape[0]
        d = np.prod(y.data.shape[1:])

        y_normalized = F.softmax(y)
        y_log_softmax = F.log_softmax(y)
        negentropy = F.sum(y_normalized * y_log_softmax, axis=1) / d

        #zeros = to_device(np.zeros(bs).astype(np.float32), 2)
        ones = to_device(-np.ones(bs).astype(np.float32), 2)
        self.loss = F.sum(F.maximum(
            Variable(ones), 
            - negentropy)) / bs
        
        return self.loss
Пример #23
0
def estimate_advantages(rewards,
                        masks,
                        values,
                        gamma,
                        tau,
                        device,
                        terminal_ns_values=None):
    rewards, masks, values = to_device(torch.device('cpu'), rewards, masks,
                                       values)
    if terminal_ns_values is not None:
        terminal_ns_values = to_device(torch.device('cpu'), terminal_ns_values)
        current_tns_value_i = -1  # start from last
    tensor_type = type(rewards)
    deltas = tensor_type(rewards.size(0), 1)
    advantages = tensor_type(rewards.size(0), 1)

    prev_value = 0
    prev_advantage = 0

    for i in reversed(range(rewards.size(0))):
        if terminal_ns_values is not None and masks[i] == 0:
            # indicates caclulating for final state
            deltas[i] = rewards[i] + gamma * terminal_ns_values[
                current_tns_value_i] - values[i]
            current_tns_value_i -= 1
        else:
            deltas[i] = rewards[i] + gamma * prev_value * masks[i] - values[i]
        advantages[i] = deltas[i] + gamma * tau * prev_advantage * masks[i]

        prev_value = values[i, 0]
        prev_advantage = advantages[i, 0]

    returns = values + advantages
    advantages = (advantages - advantages.mean()) / advantages.std()
    advantages, returns = to_device(device, advantages, returns)
    return advantages, returns
Пример #24
0
    def __init__(self,
                 model: nn.Module,
                 dataloaders: Dict[str, DataLoader],
                 device: torch.device,
                 tbw=None,
                 global_epoch=0):
        self.global_epoch = global_epoch  # keep track of the number of backprop counts in unit of iteration
        self.model = model
        to_device(model, device)
        self.device = device
        self.train_dl = dataloaders.get('train')
        self.val_dl = dataloaders.get('val', None)
        self.test_dl = dataloaders.get('test', None)

        # easy access to datasets
        self.train_ds = self.train_dl.dataset
        self.val_ds, self.test_ds = None, None
        if self.val_dl is not None:
            self.val_ds = self.val_dl.dataset
        if self.test_dl is not None:
            self.test_ds = self.test_dl.dataset

        # tensorboard writer wrapper
        self.tbw = tbw
Пример #25
0
def test(dataset, model, logger, epoch):
    with torch.no_grad():
        metric = Metric()
        model.train(False)
        batch_dataset = BatchDataset(dataset, cfg['bptt'])
        for i, input in enumerate(batch_dataset):
            input_size = input['label'].size(0)
            input = to_device(input, cfg['device'])
            output = model(input)
            output['loss'] = output['loss'].mean() if cfg['world_size'] > 1 else output['loss']
            evaluation = metric.evaluate(cfg['metric_name']['test'], input, output)
            logger.append(evaluation, 'test', input_size)
        info = {'info': ['Model: {}'.format(cfg['model_tag']), 'Test Epoch: {}({:.0f}%)'.format(epoch, 100.)]}
        logger.append(info, 'test', mean=False)
        logger.write('test', cfg['metric_name']['test'])
    return
Пример #26
0
    def __call__(
        self,
        y,
    ):
        bs = y.data.shape[0]
        d = np.prod(y.data.shape[1:])

        y_normalized = F.softmax(y)
        y_log_softmax = F.log_softmax(y)
        negentropy = F.sum(y_normalized * y_log_softmax, axis=1) / d

        #zeros = to_device(np.zeros(bs).astype(np.float32), 2)
        ones = to_device(-np.ones(bs).astype(np.float32), 2)
        self.loss = F.sum(F.maximum(Variable(ones), -negentropy)) / bs

        return self.loss
Пример #27
0
def create_model(config, dataset, create_W_emb=False):
    model_class = None
    model_params = {}

    if config.model == 'counts':
        model_class = CountsModel
        model_params.update(
            dict(
                input_size=dataset.nb_features,
                output_size=dataset.nb_classes,
                dropout=config.dropout,
            ))

    elif config.model == 'han' or config.model == 'rnn':
        if create_W_emb:
            word_embeddings = load_pickle(EMBEDDINGS_FILENAME)
            print(f'Embeddings: {len(word_embeddings)}')

            W_emb = create_embeddings(word_embeddings, dataset.vocab)
        else:
            W_emb = None

        model_class = RNNModel
        model_params.update(
            dict(vocab_size=dataset.vocab_size,
                 trainable_embeddings=config.trainable_embeddings,
                 hidden_size=config.hidden_size,
                 dropout=config.dropout,
                 output_size=dataset.nb_classes,
                 W_emb=W_emb,
                 embedding_size=300,
                 padding_idx=0))

        if config.model == 'han':
            model_class = HANModel
            model_params.update(dict(attention_size=config.attention_size, ))

    else:
        raise ValueError(f'Model {config.model} is unknown')

    model = model_class(**model_params)
    init_weights(model)
    model = to_device(model)

    print(f'Model: {model.__class__.__name__}')

    return model
Пример #28
0
    def test(self):
        
        i = 0
        sum_loss = 0.0
        sum_ssim = 0.0
        average_ssim = 0.0
        average_loss = 0.0
        for epoch in range(self.args.epochs):


            self.model.eval()   #?start?

            for data in self.loader:       #(test-for-train)
                i = i + 1


                data = to_device(data, self.device)

                left = data['left_image']
                bg_image = data['bg_image']

                disps = self.model(left)

                print(disps.shape)

                l_loss = l1loss(disps,bg_image)
                ssim_loss = ssim(disps,bg_image)
				psnr222 = psnr1(disps,bg_image)


                sum_loss = sum_loss + l_loss.item()
                sum_ssim += ssim_loss.item()

                average_ssim = sum_ssim / i
                average_loss = sum_loss / i

                # print average_loss
    
                disp_show = disps.squeeze()
                bg_show = bg_image.squeeze()
                print(bg_show.shape)
                plt.figure()
                plt.subplot(1,2,1)
                plt.imshow(disp_show.data.cpu().numpy())
                plt.subplot(1,2,2)
                plt.imshow(bg_show.data.cpu().numpy())
                plt.show() 
Пример #29
0
def train_gan(model, train_loader, optimizer_D, optimizer_G, args):
    # Initialization
    Tensor = torch.cuda.FloatTensor if args.device == "cuda" else torch.FloatTensor
    model.to(args.device)

    for epoch in range(args.epochs):

        for idx, batch in enumerate(train_loader):

            batch = to_device(batch, args.device)
            batch_size = batch.shape[0]
            # Train discriminator
            optimizer_D.zero_grad()
            # Noise
            z = Tensor(np.random.normal(0,
                                        1,
                                        size=(batch_size, args.random_dim)),
                       device=args.device)
            # Generate synthetic examples
            batch_synthetic = model.G(
                z).detach()  # No gradient for generator's parameters
            # Discriminator outputs
            y_real = model.D(batch)
            y_synthetic = model.D(batch_synthetic)
            # Gradient penalty
            gradient_penalty = model.D.compute_gradient_penalty(
                batch.data, batch_synthetic.data)
            # Loss & Update
            loss_D = model.D.loss(y_real, y_synthetic, gradient_penalty)
            loss_D.backward()
            optimizer_D.step()

            # Train generator ever n_critic iterations
            if idx % args.n_critic == 0:
                # The loss function at this point is an approximate of EM distance
                model.logs["approx. EM distance"].append(loss_D.item())
                optimizer_G.zero_grad()
                # Generate synthetic examples
                batch_synthetic = model.G(z)
                # Loss & Update
                loss_G = model.G.loss(model.D(batch_synthetic))
                loss_G.backward()
                optimizer_G.step()
                if args.verbose and idx % 100 == 0:
                    print(
                        f"Epoch {epoch}, Iteration {idx}, Appriximation of EM distance: {loss_D.item()}"
                    )
    def testOneEpoch(self, epoch=None):
        """Run test epoch.

        Parameter
        ---------
        epoch: current epoch. epoch = -1 means the test is
            done after net initialization.
        """
        if epoch is None:
            epoch = self.last_epoch
        # test only use pair outfits
        self.net.eval()
        phase = "test"
        latest_time = time()
        tracer = utils.tracer.Tracer(win_size=0)
        loader = self.loader[phase].make_nega()
        num_batch = loader.num_batch
        msg = "Epoch[{}]:Test [%d]/[{}]".format(epoch, num_batch)
        self.net.rank_metric.reset()
        for idx, inputs in enumerate(loader):
            # compute output and loss
            inputv = utils.to_device(inputs, self.device)
            uidx = inputs[-1].view(-1).tolist()
            batch_size = len(uidx)
            data_time = time() - latest_time
            with torch.no_grad():
                loss_, accuracy_ = self.step_batch(inputv)
                loss = self.gather_loss(loss_, backward=False)
                accuracy = self.gather_accuracy(accuracy_)
            # update time and history
            batch_time = time() - latest_time
            latest_time = time()
            data = dict(data_time=data_time, batch_time=batch_time)
            data.update(loss)
            data.update(accuracy)
            tracer.update_history(self.iter, data, weight=batch_size)
            LOGGER.info(msg, idx)
            tracer.logging()
        # compute average results
        rank_results = self.net.rank_metric.rank()
        results = {k: v.avg for k, v in tracer.get_history().items()}
        results.update(rank_results)
        self.tracer.update(phase, self.iter, results)
        LOGGER.info("Epoch[%d] Average:", epoch)
        self.tracer.logging(phase)
        return results
Пример #31
0
 def __call__(self, h):
     if len(h.shape) != 4:
         return 0
     
     # (b, c, h, w) -> (b, h, w, c) -> (b, h*w, c)
     h = F.transpose(h, (0, 2, 3, 1))
     shape = h.shape
     b, n, c =  shape[0], shape[1]*shape[2], shape[3]
     h = F.reshape(h, (b, n, c))
     s = 0
     xp = cuda.get_array_module(h.data)
     I_ = xp.identity(n)
     I_ = Variable(to_device(I_, device))
     for h_ in h:
         s += F.sum(F.square(F.linear(h_, h_) - I_))
     l = s / (b * n * c)
     return l
Пример #32
0
def main(args):
    data = read_data()
    train_data, test_data, deep_columns_idx, embedding_columns_dict = feature_engine(
        data)
    data_wide = train_data[0]
    train_data = (torch.from_numpy(train_data[0].values),
                  torch.from_numpy(train_data[1].values),
                  torch.from_numpy(train_data[2].values))
    train_data = trainset(train_data)
    test_data = (torch.from_numpy(test_data[0].values),
                 torch.from_numpy(test_data[1].values),
                 torch.from_numpy(test_data[2].values))
    test_data = trainset(test_data)
    trainloader = DataLoader(train_data,
                             batch_size=args.batch_size,
                             shuffle=True)
    testloader = DataLoader(test_data,
                            batch_size=args.batch_size,
                            shuffle=False)
    device = to_device()
    # parameters setting
    deep_model_params = {
        'deep_columns_idx': deep_columns_idx,
        'embedding_columns_dict': embedding_columns_dict,
        'hidden_layers': args.hidden_layers,
        'dropouts': args.dropouts,
        'deep_output_dim': args.deep_out_dim
    }
    wide_model_params = {
        'wide_input_dim': data_wide.shape[1],
        'wide_output_dim': args.wide_out_dim
    }
    activation, criterion = set_method(args.method)
    widedeep = WideDeep(wide_model_params, deep_model_params, activation)
    widedeep = widedeep.to(device)
    optimizer = torch.optim.Adam(widedeep.parameters(), lr=args.lr)
    train(widedeep,
          trainloader,
          testloader,
          optimizer,
          criterion,
          device,
          epochs=args.epochs,
          print_step=args.print_step,
          validation=args.validation)
    save_model(widedeep, "wide_deep_model_{}.pkl".format(time.time()))
Пример #33
0
    def __call__(self, h):
        if len(h.shape) != 4:
            return 0

        # (b, c, h, w) -> (b, h, w, c) -> (b, h*w, c)
        h = F.transpose(h, (0, 2, 3, 1))
        shape = h.shape
        b, n, c = shape[0], shape[1] * shape[2], shape[3]
        h = F.reshape(h, (b, n, c))
        s = 0
        xp = cuda.get_array_module(h.data)
        I_ = xp.identity(n)
        I_ = Variable(to_device(I_, device))
        for h_ in h:
            s += F.sum(F.square(F.linear(h_, h_) - I_))
        l = s / (b * n * c)
        return l
def test(data_loader, model, logger, epoch):
    with torch.no_grad():
        metric = Metric()
        model.train(False)
        for i, input in enumerate(data_loader):
            input = collate(input)
            input_size = len(input['img'])
            input = to_device(input, config.PARAM['device'])
            output = model(input)
            output['loss'] = output['loss'].mean() if config.PARAM['world_size'] > 1 else output['loss']
            evaluation = metric.evaluate(config.PARAM['metric_names']['test'], input, output)
            logger.append(evaluation, 'test', input_size)
        info = {'info': ['Model: {}'.format(config.PARAM['model_tag']),
                         'Test Epoch: {}({:.0f}%)'.format(epoch, 100.)]}
        logger.append(info, 'test', mean=False)
        logger.write('test', config.PARAM['metric_names']['test'])
    return
def evalute_accuracy(config):
    """Evaluate fashion net for accuracy."""
    # make data loader
    parallel, device = utils.get_device(config.gpus)
    param = config.data_param
    loader = polyvore.data.get_dataloader(param)
    net = get_net(config)
    net.eval()
    # set data mode to pair for testing pair-wise accuracy
    LOGGER.info("Testing for accuracy")
    loader.set_data_mode("PairWise")
    loader.make_nega()
    accuracy = binary = 0.0
    for idx, inputv in enumerate(loader):
        # compute output and loss
        uidx = inputv[-1].view(-1)
        batch_size = uidx.numel()
        inputv = utils.to_device(inputv, device)
        with torch.no_grad():
            if parallel:
                output = data_parallel(net, inputv, config.gpus)
            else:
                output = net(*inputv)
        _, batch_results = net.gather(output)
        batch_accuracy = batch_results["accuracy"]
        batch_binary = batch_results["binary_accuracy"]
        LOGGER.info(
            "Batch [%d]/[%d] Accuracy %.3f Accuracy (Binary Codes) %.3f",
            idx,
            loader.num_batch,
            batch_accuracy,
            batch_binary,
        )
        accuracy += batch_accuracy * batch_size
        binary += batch_binary * batch_size
    accuracy /= loader.num_sample
    binary /= loader.num_sample
    LOGGER.info("Average accuracy: %.3f, Binary Accuracy: %.3f", accuracy, binary)
    # save results
    if net.param.zero_iterm:
        results = dict(uaccuracy=accuracy, ubinary=binary)
    elif net.param.zero_uterm:
        results = dict(iaccuracy=accuracy, ibinary=binary)
    else:
        results = dict(accuracy=accuracy, binary=binary)
    update_npz(config.result_file, results)
Пример #36
0
 def __init__(self,
              model,
              train_data,
              valid_data,
              token2id,
              lr,
              batch_size,
              epochs,
              patience=10):
     self.model = to_device(model)
     self.train_data = train_data
     self.valid_data = valid_data
     self.token2id = token2id
     self.lr = lr
     self.batch_size = batch_size
     self.epochs = epochs
     self.patience = patience
 def outfit_scores():
     """Compute rank scores for data set."""
     num_users = net.param.num_users
     scores = [[] for u in range(num_users)]
     binary = [[] for u in range(num_users)]
     for inputv in tqdm.tqdm(loader, desc="Computing scores"):
         uidx = inputv[-1].view(-1)
         inputv = utils.to_device(inputv, device)
         with torch.no_grad():
             if parallel:
                 output = data_parallel(net, inputv, config.gpus)
             else:
                 output = net(*inputv)
         # save scores for each user
         for n, u in enumerate(uidx):
             scores[u].append(output[0][n].item())
             binary[u].append(output[1][n].item())
     return scores, binary
Пример #38
0
    def __call__(self, h, h_gen=None, test=False):
        # Concat
        if h_gen is not None:
            h = F.concat((h, h_gen))
        else:
            h_zero = Variable(to_device(
                np.zeros(h.shape, dtype=np.float32), self.device))
            h = F.concat((h, h_zero))

        h = self.deconv0(h)
        h = self.bn0(h)
        h = self.act(h)

        h = self.deconv1(h)  # 7x7 -> 14x14
        h = self.bn1(h, test)
        h = self.act(h)

        h = self.deconv2(h)  # 14x14 -> 28x28
        h = F.tanh(h)
        return h
Пример #39
0
 def generate_x_recon(self, bs):
     #TODO: consider diversity, now only r \in {0, 1}
     y = self.generate_random_onehot(bs)
     y = to_device(y, self.device)
     x_recon = self.decoder(y)
     return x_recon
Пример #40
0
 def generate_random(self, bs, dim=30):
     r = np.random.uniform(-1, 1, (bs, dim)).astype(np.float32)
     r = to_device(r, self.device)
     return r