예제 #1
0
파일: models.py 프로젝트: sumitsk/algp
    def cov_mat(self,
                x1,
                x2=None,
                white_noise_var=None,
                add_likelihood_var=False):
        # white_noise_var needs to be passed explicitly
        x1_ = to_torch(x1)
        x2_ = to_torch(x2)

        self.model.eval()
        with torch.no_grad():
            x1_ = self.model.latent_func(x1_)
            if x2_ is None or torch.equal(x1_, x2_):
                cov = self.model.kernel_covar_module(
                    x1_).evaluate().cpu().numpy()
            else:
                x2_ = self.model.latent_func(x2_)
                cov = self.model.kernel_covar_module(
                    x1_, x2_).evaluate().cpu().numpy()

            if white_noise_var is not None:
                cov += np.diag(white_noise_var)

            # for training data, add likelihood variance
            if add_likelihood_var:
                cov += self.likelihood.log_noise.exp().item() * np.eye(
                    len(cov))
        return cov
예제 #2
0
    def extract_feature(self, data_loader):
        print_freq = 50
        self.cnn_model.eval()
        self.att_model.eval()

        batch_time = AverageMeter()
        data_time = AverageMeter()
        end = time.time()

        allfeatures = 0
        allfeatures_raw = 0

        for i, (imgs, flows, _, _) in enumerate(data_loader):
            imgs = to_torch(imgs)
            flows = to_torch(flows)
            device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
            imgs = imgs.to(device)
            flows = flows.to(device)
            with torch.no_grad():
                if i == 0:
                    out_feat, out_raw = self.cnn_model(imgs, flows, self.mode)
                    out_feat, out_raw = self.att_model.selfpooling_model(out_feat, out_raw)
                    allfeatures = out_feat
                    allfeatures_raw = out_raw
                    preimgs = imgs
                    preflows = flows
                elif imgs.size(0) < data_loader.batch_size:
                    flaw_batchsize = imgs.size(0)
                    cat_batchsize = data_loader.batch_size - flaw_batchsize
                    imgs = torch.cat((imgs, preimgs[0:cat_batchsize]), 0)
                    flows = torch.cat((flows, preflows[0:cat_batchsize]), 0)

                    out_feat, out_raw = self.cnn_model(imgs, flows, self.mode)
                    out_feat, out_raw = self.att_model.selfpooling_model(out_feat, out_raw)

                    out_feat = out_feat[0:flaw_batchsize]
                    out_raw = out_feat[0:flaw_batchsize]

                    allfeatures = torch.cat((allfeatures, out_feat), 0)
                    allfeatures_raw = torch.cat((allfeatures_raw, out_raw), 0)
                else:
                    out_feat, out_raw = self.cnn_model(imgs, flows, self.mode)
                    out_feat, out_raw = self.att_model.selfpooling_model(out_feat, out_raw)

                    allfeatures = torch.cat((allfeatures, out_feat), 0)
                    allfeatures_raw = torch.cat((allfeatures_raw, out_raw), 0)

            batch_time.update(time.time() - end)
            end = time.time()

            if (i + 1) % print_freq == 0:
                print('Extract Features: [{}/{}]\t'
                      'Time {:.3f} ({:.3f})\t'
                      'Data {:.3f} ({:.3f})\t'
                      .format(i + 1, len(data_loader),
                              batch_time.val, batch_time.avg,
                              data_time.val, data_time.avg))

        return allfeatures, allfeatures_raw
예제 #3
0
파일: models.py 프로젝트: zphilip/MOGP-AL
    def _prep_train_data(self, x, y_ind, y):
        # prepare training data
        self._train_x = to_torch(x)
        self._train_y_ind = to_torch(y_ind).long()
        train_y = to_torch(y)

        # single mean estimate across all categories
        self._train_y_mean = train_y.mean()
        self._train_y = train_y - self._train_y_mean
예제 #4
0
    def extract_feature(self, data_loader):
        print_freq = 50
        self.cnn_model.eval()
        self.att_model.eval()

        batch_time = AverageMeter()
        data_time = AverageMeter()
        end = time.time()

        allfeatures = 0
        allfeatures_raw = 0

        for i, (imgs, flows, _, _) in enumerate(data_loader):
            data_time.update(time.time() - end)
            imgs = to_torch(imgs).to(self.device)
            flows = to_torch(flows).to(self.device)

            with torch.no_grad():
                if i == 0:
                    out_feat, out_raw = self.cnn_model(imgs, flows, self.mode)
                    allfeatures = [out_feat]
                    allfeatures_raw = [out_raw]
                    preimgs = imgs
                    preflows = flows
                elif imgs.size(0) < data_loader.batch_size:
                    flaw_batchsize = imgs.size(0)
                    cat_batchsize = data_loader.batch_size - flaw_batchsize
                    imgs = torch.cat((imgs, preimgs[0:cat_batchsize]), 0)
                    flows = torch.cat((flows, preflows[0:cat_batchsize]), 0)

                    out_feat, out_raw = self.cnn_model(imgs, flows, self.mode)

                    out_feat = out_feat[0:flaw_batchsize]
                    out_raw = out_raw[0:flaw_batchsize]

                    allfeatures.append(out_feat)
                    allfeatures_raw.append(out_raw)
                else:
                    out_feat, out_raw = self.cnn_model(imgs, flows, self.mode)

                    allfeatures.append(out_feat)
                    allfeatures_raw.append(out_raw)

            batch_time.update(time.time() - end)
            end = time.time()

            if (i + 1) % print_freq == 0:
                print('Extract Features: [{}/{}]\t'
                      'Time {:.3f} ({:.3f})\t'
                      'Data {:.3f} ({:.3f})\t'
                      .format(i + 1, len(data_loader),
                              batch_time.val, batch_time.avg,
                              data_time.val, data_time.avg))

        allfeatures = torch.cat(allfeatures, 0)
        allfeatures_raw = torch.cat(allfeatures_raw, 0)
        return allfeatures, allfeatures_raw
예제 #5
0
 def _fit_net(self, generator, n_steps):
     self.net.train()  # train mode
     for i, (X_batch, y_batch) in enumerate(islice(generator, n_steps)):
         X_batch = to_torch(X_batch, cuda=self.cuda_flag)
         y_batch = to_torch(y_batch, cuda=self.cuda_flag)
         self.net_optimizer.zero_grad()  # zero-out the gradients because they accumulate by default
         y_pred = self.net.forward(X_batch)
         loss = self.net_criterion(y_pred, y_batch)
         self.net_loss.append(loss.item())
         loss.backward()  # compute gradients
         self.net_optimizer.step()  # update params
     return self
예제 #6
0
파일: models.py 프로젝트: sumitsk/algp
    def set_train_data(self, x, y, var=None):
        self._train_x = to_torch(x)
        self._train_y = to_torch(y)
        self._train_y_mean = self._train_y.mean()
        self._zero_mean_train_y = self._train_y - self._train_y_mean
        if var is not None:
            self._train_var = to_torch(var)

        if self.model is not None:
            self.model.set_train_data(inputs=self._train_x,
                                      targets=self._zero_mean_train_y,
                                      strict=False)
예제 #7
0
def group_sample(sample, sample_spec, phase="train"):
    """ Creates the Torch tensors for a sample """

    inputs = sample_spec.get_inputs()
    labels = sample_spec.get_labels()
    masks  = sample_spec.get_masks()

    input_vars = [utils.to_torch(sample[k], block=True) for k in inputs]
    label_vars = [utils.to_torch(sample[k], block=False) for k in labels]
    mask_vars  = [utils.to_torch(sample[k], block=False) for k in masks]

    return input_vars, label_vars, mask_vars
예제 #8
0
def accuracy(output, target, topk=(1, )):
    output, target = to_torch(output), to_torch(target)
    maxk = max(topk)
    batch_size = target.size(0)

    _, pred = output.topk(maxk, 1, True, True)
    pred = pred.t()
    correct = pred.eq(target.view(1, -1).expand_as(pred))

    ret = []
    for k in topk:
        correct_k = correct[:k].view(-1).float().sum(0)
        ret.append(correct_k.mul_(1. / batch_size))
    return ret
예제 #9
0
def accuracy(output, target, topk=(1,)):
    output, target = to_torch(output), to_torch(target)
    maxk = max(topk)
    batch_size = target.size(0)

    _, pred = output.topk(maxk, 1, True, True)
    pred = pred.t()
    correct = pred.eq(target.view(1, -1).expand_as(pred))

    ret = []
    for k in topk:
        correct_k = correct[:k].view(-1).float().sum(0)
        ret.append(correct_k.mul_(1. / batch_size))
    return ret
예제 #10
0
 def __init__(self,
              vocab_size,
              word_embedding_size,
              hidden_size,
              bidirectional=False,
              input_dropout_p=0,
              dropout_p=0,
              n_layers=1,
              rnn_type='lstm',
              variable_lengths=True,
              pretrain=False):
     super(RNNEncoder, self).__init__()
     self.variable_lengths = variable_lengths
     if pretrain is True:
         embedding_mat = np.load('./data/word_embedding/embed_matrix.npy')
         self.embedding = nn.Embedding.from_pretrained(
             to_torch(embedding_mat).cuda(), freeze=False)
     else:
         self.embedding = nn.Embedding(vocab_size, word_embedding_size)
     self.input_dropout = nn.Dropout(input_dropout_p)
     self.rnn_type = rnn_type
     self.rnn = getattr(nn, rnn_type.upper())(word_embedding_size,
                                              hidden_size,
                                              n_layers,
                                              batch_first=True,
                                              bidirectional=bidirectional,
                                              dropout=dropout_p)
     self.num_dirs = 2 if bidirectional else 1
예제 #11
0
def extract_cnn_feature(model, inputs, norm_test=True):
    model.eval()
    inputs = to_torch(inputs)
    inputs = Variable(inputs)
    outputs, _, _ = model(inputs, Norm_test=norm_test)
    outputs = outputs.data.cpu()
    return outputs
예제 #12
0
    def extract_feature(self, data_loader):  # 2
        # print_freq = 50
        self.cnn_model.eval()

        qf = []
        # qf_raw = []

        for i, inputs in enumerate(data_loader):
            imgs, _, _ = inputs
            b, n, s, c, h, w = imgs.size()
            imgs = imgs.view(b * n, s, c, h, w)
            imgs = to_torch(imgs)  # torch.Size([8, 8, 3, 256, 128])
            # flows = to_torch(flows)  # torch.Size([8, 8, 3, 256, 128])
            device = torch.device(
                "cuda:0" if torch.cuda.is_available() else "cpu")
            imgs = imgs.to(device)
            # flows = flows.to(device)
            with torch.no_grad():
                out_feat, out_raw = self.cnn_model(imgs)
                allfeatures = out_feat.view(n, -1)  # torch.Size([8, 128])
                # allfeatures_raw = out_raw.view(n, -1)  # torch.Size([8, 128])
                allfeatures = torch.mean(allfeatures,
                                         0).data.cpu()  # 汇总一个序列特征,取平均
                # allfeatures_raw = torch.mean(allfeatures_raw, 0).data
                qf.append(allfeatures)
                # qf_raw.append(allfeatures_raw)
        qf = torch.stack(qf)
        #    qf_raw = torch.stack(allfeatures_raw)

        print(
            "Extracted features for query/gallery set, obtained {}-by-{} matrix"
            .format(qf.size(0), qf.size(1)))
        return qf
예제 #13
0
파일: env_loop.py 프로젝트: nnaisense/MAGE
 def _reset(self):
     self._env = self._get_env(
     )  # Create new env every episode. This is a way to prevent checkpointing from having to save env state
     if self.torch_np_conversion:
         self._state = to_torch(self._env.reset())
     else:
         self._state = self._env.reset()
     self._step_i = 0
예제 #14
0
 def compute_test(best_model):
     model = best_model
     train_predict, hx = model(to_torch(train_x))
     train_predict = train_predict.detach().numpy()
     val_predict, hx = model(to_torch(validate_x), hx)
     test_predict, _ = model(to_torch(test_x), hx)
     test_predict = test_predict.detach().numpy()
     # invert predictions
     test_predict_r = scaler.inverse_transform(test_predict[:, 0, :])
     test_y_r = scaler.inverse_transform(test_y[:, 0, :])
     # calculate error
     test_rmse = math.sqrt(
         mean_squared_error(test_y_r[:, 0], test_predict_r[:, 0]))
     test_mape = (abs(
         (test_predict_r[:, 0] - test_y_r[:, 0]) / test_y_r[:, 0])).mean()
     test_mae = mean_absolute_error(test_predict_r[:, 0], test_y_r[:, 0])
     return test_rmse, test_mape, test_mae
예제 #15
0
파일: models.py 프로젝트: zphilip/MOGP-AL
    def predict(self, x, y_ind, return_var=False, return_ent=False):
        # in absence of any training data
        if self.model is None:
            if return_ent:
                return np.full(len(x), 0.0)
            elif return_var:
                # assuming rbf kernel with scale = 1
                return np.full(len(x), 0.0), np.full(len(x), 1.0)
            else:
                raise NotImplementedError(
                    'Predictive distribution can not be estimated in absence of training data'
                )

        self.model.eval()
        self.likelihood.eval()
        ind_ = to_torch(y_ind).long()

        # TODO: for fast variance computation, add all the relevant flags
        # fast_pred_var uses LOVE
        with torch.no_grad():
            x_ = to_torch(x)
            if len(self._train_x) > 10:
                with gpytorch.fast_pred_var():
                    pred_grv = self.likelihood(self.model(x_, ind_))
            else:
                pred_grv = self.likelihood(self.model(x_, ind_))

            if return_ent:
                return entropy_from_cov(
                    pred_grv.covariance_matrix.cpu().numpy())

            # single mean
            mu = pred_grv.mean + self._train_y_mean

            # category-wise mean
            # mu = pred_grv.mean + torch.gather(self._train_y_mean, 0, ind_)

            mu = mu.cpu().numpy()
            if return_var:
                var = pred_grv.variance.cpu().numpy()
                return mu, var
        return mu
예제 #16
0
    def step(self, action):
        state, reward, done, info = self.env.step(action)

        with torch.no_grad():  # Just to be sure...
            t_state = state if self.mode == "torch" else to_torch(state)
            done_condition = bool(self.env.unwrapped.is_done(
                t_state).item())  # done do to termination conditions
        done_timelimit = done if done else False
        done = done_condition or done_timelimit

        return state, reward, done, info
예제 #17
0
 def _predict_proba(self, X):
     y_proba = []
     self.net.eval()  # evaluation mode
     for X_batch in OneEpoch(X, batch_size=self.batch_size):
         X_batch = X_batch.astype(np.float32)
         with torch.no_grad():
             X_batch = to_torch(X_batch, cuda=self.cuda_flag)
             proba_batch = F.softmax(self.net.forward(X_batch), dim=1).cpu().data.numpy()
         y_proba.extend(proba_batch)
     y_proba = np.array(y_proba)
     return y_proba
예제 #18
0
 def _fit_combined(self, generator, recovery_generator, n_steps):
     self.net.train()  # train mode
     self.adv_net.train()  # train mode
     for i, (X_batch, y_batch, z_batch) in enumerate(islice(generator, n_steps)):
         X_batch = to_torch(X_batch, cuda=self.cuda_flag)
         y_batch = to_torch(y_batch, cuda=self.cuda_flag)
         z_batch = to_torch(z_batch, cuda=self.cuda_flag)
         self.net_optimizer.zero_grad()  # zero-out the gradients because they accumulate by default
         y_pred = self.net.forward(X_batch)
         z_pred = self.adv_net.forward(y_pred)
         net_loss = self.net_criterion(y_pred, y_batch)
         adv_loss = self.adv_criterion(z_pred, z_batch)
         loss = net_loss - (self.trade_off * adv_loss)
         self.adv_loss.append(adv_loss.item())
         self.net_loss.append(net_loss.item())
         self.comb_loss.append(loss.item())
         loss.backward()  # compute gradients
         self.net_optimizer.step()  # update params
         # Adversarial recovery
         self._fit_recovery(recovery_generator, self.n_recovery_steps)
     return self
예제 #19
0
파일: env_loop.py 프로젝트: nnaisense/MAGE
    def step(self, action, video_file_suffix=None):
        """ Performs a single step in the environment
            Args:
                action (numpy[d_action])
                video_file_suffix (string, optional, default=None): Suffix added to the end of the video file name

            Returns:
                 (s, s', done) transition

                old_state (torch Tensor[1, d_state])
                next_states (torch Tensor[1, d_state])
                dones (boolean[1]): indicates if episode has terminated (either do to termination condition (is_done) or time limit)
        """
        if self._state is None:
            self._reset()

        if self._record:
            video_file_full_path = self._video_file_base.format(
                video_file_suffix)
            next_state, _, done, info = self.env.step(
                action,
                filename=video_file_full_path,
                record_episode=self._record_next_episode)
        else:
            next_state, _, done, info = self.env.step(action)

        if self.torch_np_conversion:
            next_state = to_torch(next_state)
        old_state = self._state.detach()
        self._state = next_state.detach(
        )  # For more safety (not required, in principle)
        self._step_i += 1

        if self._render:
            self.env.render()

        # Note: at the end of the episode next_state != self.state. The former is the part of the
        # transition while the latter is the current state of the environment (after reset)
        if done:
            self.env.close()
            self._state = None

            if self._record:
                self._run.add_artifact(
                    video_file_full_path
                )  # save video to sacred DB  # TODO WJ: This is not nice. Why EnvLoop should know about _run?
                self._record_next_episode = self._record_in_queue  # if there's a pending recording in the queue, record it on the next episode
                self._record_in_queue = False

        return old_state, next_state, done
예제 #20
0
파일: models.py 프로젝트: sumitsk/algp
    def predict(self, x, return_cov=False, return_std=False):
        # returns posterior distribution conditioned on training data
        # call set_train_data method to set a different training data
        self.model.eval()
        self.likelihood.eval()
        x_ = to_torch(x)

        with torch.no_grad():
            pred = self.likelihood(self.model(x_))
            pred_mean = (pred.mean() + self._train_y_mean).cpu().numpy()
            if return_std:
                return pred_mean, pred.covar().diag().cpu().numpy()
            elif return_cov:
                return pred_mean, pred.covar().evaluate().cpu().numpy()
            return pred_mean
    def pred(self, tensor, z=None):
        X = tensor[:, self.predictors].numpy()
        if self.interaction:
            X = self.poly.fit_transform(X)
        if self.regime:
            if z is None:
                X_extra = X.reshape(X.shape[0], 1,
                                    -1).repeat(len(self.predictors),
                                               1).reshape(X.shape[0], -1)
                X = np.hstack([X, X_extra])
            else:
                X = self.add_regime_ind(X, z)

        y_pred = to_torch(self.model.predict(X))

        return y_pred
예제 #22
0
def run(agent_):
    env.reset()
    locs = [env.get_agent_loc()]
    cumulative_reward, step = 0, 0
    while step < max_steps:
        # get current state
        s_t = to_torch(env.get_agent_loc().reshape(1, -1))
        # take an action
        a_t, prob_a_t = agent_.choose_action(s_t)
        # get a reward, make env transition
        r_t = env.step(a_t)
        # updates
        cumulative_reward += r_t * gamma**step
        step += 1
        locs.append(env.get_agent_loc())
        # termination condition
        if env.is_terminal():
            break
    return cumulative_reward, step, locs
예제 #23
0
def extract_cnn_feature(model, inputs, pool_feature=False, org_feature=False):
    model.eval()
    with torch.no_grad():
        inputs = to_torch(inputs)
        inputs = Variable(inputs).cuda()
        if pool_feature is False:
            outputs = model(inputs, rot=False, org_feature=org_feature)
            return outputs
        else:
            # Register forward hook for each module
            outputs = {}


        def func(m, i, o): outputs['pool_feature'] = o.data.view(n, -1)
        hook = model.module._modules.get('features').register_forward_hook(func)
        model(inputs)
        hook.remove()
        # print(outputs['pool_feature'].shape)
        return outputs['pool_feature']
예제 #24
0
def extract_cnn_feature(model, inputs, modules=None):
    model.eval()
    inputs = to_torch(inputs)
    inputs = Variable(inputs, volatile=True).cuda()
    if modules is None:
        outputs = model(inputs)
        outputs = outputs.data.cpu()
        return outputs
    # Register forward hook for each module
    outputs = OrderedDict()
    handles = []
    for m in modules:
        outputs[id(m)] = None

        def func(m, i, o):
            outputs[id(m)] = o.data.cpu()

        handles.append(m.register_forward_hook(func))
    model(inputs)
    for h in handles:
        h.remove()
    return list(outputs.values())
예제 #25
0
    def extract_feature(self, data_loader):  # 2
        # print_freq = 50
        self.cnn_model.eval()

        qf = []
        # qf_raw = []

        for i, inputs in enumerate(data_loader):
            feature = []
            imgs, _, _ = inputs
            b, n, s, c, h, w = imgs.size()  # torch.Size([9, 16, 5, 256, 128])
            imgs = imgs.view(b * n, s, c, h, w)
            imgs = to_torch(imgs)  # torch.Size([9, 16, 5, 256, 128])
            imgseq_len = imgs.size(0)

            device = torch.device(
                "cuda:0" if torch.cuda.is_available() else "cpu")

            # flows = flows.to(device)
            with torch.no_grad():
                for i in range(imgseq_len):
                    img = imgs[i].to(device)  # torch.Size([16, 5, 256, 128])
                    out_feat, _, _, _ = self.cnn_model(img, img)  # [1, 128]
                    out_feat = out_feat.squeeze().data.cpu()
                    feature.extend(out_feat)
            features = np.array(feature)
            features = features.reshape(imgseq_len, -1)
            features = np.average(features, 0)
            # allfeatures = features.tolist()
            allfeatures = torch.from_numpy(features).unsqueeze(0)
            qf.append(allfeatures)
        qf = torch.stack(qf)
        #    qf_raw = torch.stack(allfeatures_raw)

        print(
            "Extracted features for query/gallery set, obtained {}-by-{} matrix"
            .format(qf.size(0), qf.size(1)))
        return qf
예제 #26
0
def user_mem_init(u_id, device, feature_mem, loading_model, alpha):
    """
    Initialize user memory cube with personalized bias term and attention values
    :param u_id: User ID
    :param device: Device choice
    :param feature_mem: Feature-specific memory component
    :param loading_model: Loaded model
    :param alpha: Hyper-parameter
    :return: Personalized bias term and attention values
    """
    # Path to raw processed data (in Pickle files)
    path = 'data_prep/processed_data/raw/'
    # Load the Pickle files
    u_x1_data = pickle.load(
        open('{}sample_{}_x1.p'.format(path, str(u_id)), 'rb'))
    # Convert the user data into PyTorch tensor
    u_x1 = to_torch([u_x1_data]).to(device)
    # Get user profile matrix
    pu = loading_model(u_x1)
    # Retrieve the personalized bias term and the attention values
    personalized_bias_term, att_values = feature_mem.read_head(pu, alpha)
    # Delete variables to save storage
    del u_x1_data, u_x1, pu
    return personalized_bias_term, att_values
예제 #27
0
    def evaluate(self, query_loader, gallery_loader, queryinfo, galleryinfo):

        self.cnn_model.eval()
        self.att_model.eval()
        self.classifier_model.eval()

        querypid = queryinfo.pid  # <class 'list'>: [74, 20, 90, 151, 1, 69, 84, 149, 5, 111, -1,..., 71, 139, 36]
        querycamid = queryinfo.camid   # [0, 0, 0 ,...., 0]
        querytranum = queryinfo.tranum  # <class 'list'>: [35, 21, 24, 26, 28, 38, 30, 32, 1, ..., 20, 31, 25, 29]
        gallerypid = galleryinfo.pid  # # <class 'list'>: [74, 20, 90, 151, 1, 69, 84, 149, 5, 111, -1,..., 71, 139, 36]
        gallerycamid = galleryinfo.camid  # [1, 1, 1 ,...., 1]
        gallerytranum = galleryinfo.tranum  # <class 'list'>: [19, 11, 23, 25, 20,  12,..2, 27, 17]

        query_resfeatures, query_resraw = self.extract_feature(query_loader)  # torch.Size([2787, 8, 128])
        gallery_resfeatures, gallery_resraw = self.extract_feature(gallery_loader)  # torch.Size([2006, 8, 128])

        querylen = len(querypid)  # 100
        gallerylen = len(gallerypid)  # 100

        # online gallery extraction
        single_distmat = np.zeros((querylen, gallerylen))  # <class 'tuple'>: (100, 100)

        q_start = 0
        pooled_query = []
        with torch.no_grad():
            for qind, qnum in enumerate(querytranum):
                query_feat_tmp = query_resfeatures[q_start:q_start+qnum, :, :]  # torch.Size([35, 8, 128])
                query_featraw_tmp = query_resraw[q_start:q_start+qnum, :, :]  # torch.Size([35, 8, 2048])
                pooled_query_tmp, hidden_query_tmp = self.att_model.selfpooling_model(query_feat_tmp, query_featraw_tmp)  # torch.Size([35, 128])
                pooled_query.append(pooled_query_tmp)
                q_start += qnum
            pooled_query = torch.cat(pooled_query, 0)  # torch.Size([2787, 128])

        g_start = 0
        pooled_gallery = []
        with torch.no_grad():
            for gind, gnum in enumerate(gallerytranum):
                gallery_feat_tmp = gallery_resfeatures[g_start:g_start+gnum, :, :]  # torch.Size([19, 8, 128])
                gallery_featraw_tmp = gallery_resraw[g_start:g_start+gnum, :, :]  # torch.Size([19, 8, 2048])
                pooled_gallery_tmp, hidden_gallery_tmp = self.att_model.selfpooling_model(gallery_feat_tmp, gallery_featraw_tmp)
                # torch.Size([19, 128])
                pooled_gallery.append(pooled_gallery_tmp)
                g_start += gnum
            pooled_gallery = torch.cat(pooled_gallery, 0)  # torch.Size([2006, 128])
        # pooled_query, hidden_query = self.att_model.selfpooling_model_1(query_resfeatures, query_resraw)
        # pooled_gallery, hidden_gallery = self.att_model.selfpooling_model_2(gallery_resfeatures, gallery_resraw)

        pooled_query_2 = self.getcrosspool(query_resfeatures, query_resraw, pooled_gallery, querytranum)
        pooled_query_2 = to_numpy(pooled_query_2)
        torch.cuda.empty_cache()
        pooled_gallery_2 = self.getcrosspool(gallery_resfeatures, gallery_resraw, pooled_query, gallerytranum)
        pooled_gallery_2 = to_numpy(pooled_gallery_2)
        torch.cuda.empty_cache()

        pooled_query_2 = to_torch(pooled_query_2).to(self.device)
        pooled_gallery_2 = to_torch(pooled_gallery_2).to(self.device)

        # q_start = 0
        # pooled_query_2 = []
        # with torch.no_grad():
        #     for qind, qnum in enumerate(querytranum):
        #         query_feat_tmp = query_resfeatures[q_start:q_start+qnum, :, :]
        #         query_featraw_tmp = query_resraw[q_start:q_start+qnum, :, :]
        #         pooled_query_2_tmp = self.att_model.crosspooling_model(query_feat_tmp, query_featraw_tmp, pooled_gallery)
        #         pooled_query_2.append(pooled_query_2_tmp)
        #         q_start += qnum
        #         torch.cuda.empty_cache()
        #
        #     torch.cuda.empty_cache()
        #     pooled_query_2 = torch.cat(pooled_query_2, 1)
        #     torch.cuda.empty_cache()


        # g_start = 0
        # pooled_gallery_2 = []
        # with torch.no_grad():
        #     for gind, gnum in enumerate(gallerytranum):
        #         gallery_feat_tmp = gallery_resfeatures[g_start:g_start+gnum, :, :]  # torch.Size([19, 8, 128])
        #         gallery_featraw_tmp = gallery_resraw[g_start:g_start+gnum, :, :]  # torch.Size([19, 8, 2048])
        #         pooled_gallery_2_tmp = self.att_model.crosspooling_model(gallery_feat_tmp, gallery_featraw_tmp, pooled_query)
        #         # torch.Size([2787, 19, 128])
        #         pooled_gallery_2.append(pooled_gallery_2_tmp)
        #         g_start += gnum
        #         torch.cuda.empty_cache()
        #
        #     torch.cuda.empty_cache()
        #     pooled_gallery_2 = torch.cat(pooled_gallery_2, 1)

        pooled_query_2 = pooled_query_2.permute(1, 0, 2)  # torch.Size([2787, 2006, 128])
        pooled_query, pooled_gallery = pooled_query.unsqueeze(1), pooled_gallery.unsqueeze(0)  # torch.Size([2787, 1, 128])  torch.Size([1, 2006, 128])

        with torch.no_grad():
            encode_scores = self.classifier_model(pooled_query, pooled_gallery_2, pooled_query_2, pooled_gallery)

        encode_scores = to_torch(encode_scores).to(self.device)
        encode_size = encode_scores.size()  # torch.Size([2787, 2006, 2])
        encodemat = encode_scores.view(-1, 2)  # torch.Size([5590722, 2])
        encodemat = F.softmax(encodemat)
        encodemat = encodemat.view(encode_size[0], encode_size[1], 2)  # torch.Size([2787, 2006, 2])

        single_distmat_all = encodemat[:, :, 0]  # torch.Size([2787, 2006])
        single_distmat_all = single_distmat_all.data.cpu().numpy()
        q_start, g_start = 0, 0
        for qind, qnum in enumerate(querytranum):
            for gind, gnum in enumerate(gallerytranum):
                distmat_qg = single_distmat_all[q_start:q_start+qnum, g_start:g_start+gnum]
                # percile = np.percentile(distmat_qg, 20)
                percile = np.percentile(distmat_qg, 20)
                if distmat_qg[distmat_qg <= percile] is not None:
                    distmean = np.mean(distmat_qg[distmat_qg <= percile])
                else:
                    distmean = np.mean(distmat_qg)

                single_distmat[qind, gind] = distmean
                g_start = g_start + gnum
            g_start = 0
            q_start = q_start + qnum

        return evaluate_seq(single_distmat, querypid, querycamid, gallerypid, gallerycamid)
예제 #28
0
# prealloc
log_return = []
log_steps = []
for i in range(n_trials):

    env.reset()
    cumulative_reward = 0
    step = 0

    while step < max_steps:
        if step == 0:
            h_prev = get_init_state()

        # get current state to predict action value
        s_t = to_torch(env.get_agent_loc().reshape(1, -1))
        out_t, h_t = rnn(s_t.view(1, 1, -1), h_prev)
        q_t = readout(out_t)

        # epsilon greedy action selection
        if np.random.uniform() > epsilon:
            a_t = torch.argmax(q_t)
        else:
            a_t = np.random.randint(n_actions)
        # transition and get reward
        r_t = env.step(a_t)

        # get next states info
        s_next = to_torch(env.get_agent_loc().reshape(1, -1))
        out_next, _ = rnn(s_next.view(1, 1, -1), h_t)
        q_next = readout(out_next)
예제 #29
0
    def evaluate(self, query_loader, gallery_loader, queryinfo, galleryinfo):

        self.cnn_model.eval()
        self.att_model.eval()
        self.classifier_model.eval()

        querypid = queryinfo.pid
        querycamid = queryinfo.camid
        querytranum = queryinfo.tranum

        gallerypid = galleryinfo.pid
        gallerycamid = galleryinfo.camid
        gallerytranum = galleryinfo.tranum

        pooled_probe, hidden_probe = self.extract_feature(query_loader)

        querylen = len(querypid)
        gallerylen = len(gallerypid)

        # online gallery extraction
        single_distmat = np.zeros((querylen, gallerylen))
        gallery_resize = 0
        gallery_popindex = 0
        gallery_popsize = gallerytranum[gallery_popindex]

        gallery_resfeatures = 0
        gallery_resraw = 0

        gallery_empty = True
        preimgs = 0
        preflows = 0

        # time
        gallery_time = AverageMeter()
        end = time.time()

        for i, (imgs, flows, _, _) in enumerate(gallery_loader):
            imgs = to_torch(imgs)
            flows = to_torch(flows)
            device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
            imgs = imgs.to(device)
            flows = flows.to(device)

            with torch.no_grad():
                seqnum = imgs.size(0)
                if i == 0:
                    preimgs = imgs
                    preflows = flows

                if gallery_empty:
                    out_feat, out_raw = self.cnn_model(imgs, flows, self.mode)

                    gallery_resfeatures = out_feat
                    gallery_resraw = out_raw

                    gallery_empty = False

                elif imgs.size(0) < gallery_loader.batch_size:
                    flaw_batchsize = imgs.size(0)
                    cat_batchsize = gallery_loader.batch_size - flaw_batchsize
                    imgs = torch.cat((imgs, preimgs[0:cat_batchsize]), 0)
                    flows = torch.cat((flows, preflows[0:cat_batchsize]), 0)
                    out_feat, out_raw = self.cnn_model(imgs, flows, self.mode)

                    out_feat = out_feat[0:flaw_batchsize]
                    out_raw  = out_raw[0:flaw_batchsize]

                    gallery_resfeatures = torch.cat((gallery_resfeatures, out_feat), 0)
                    gallery_resraw = torch.cat((gallery_resraw, out_raw), 0)

                else:
                    out_feat, out_raw = self.cnn_model(imgs, flows, self.mode)

                    gallery_resfeatures = torch.cat((gallery_resfeatures, out_feat), 0)
                    gallery_resraw = torch.cat((gallery_resraw, out_raw), 0)

            gallery_resize = gallery_resize + seqnum

            while gallery_popsize <= gallery_resize:

                if (gallery_popindex + 1) % 50 == 0:
                    print('gallery--{:04d}'.format(gallery_popindex))
                gallery_popfeatures = gallery_resfeatures[0:gallery_popsize, :]
                gallery_popraw = gallery_resraw[0:gallery_popsize, :]

                if gallery_popsize < gallery_resize:
                    gallery_resfeatures = gallery_resfeatures[gallery_popsize:gallery_resize, :]
                    gallery_resraw = gallery_resraw[gallery_popsize:gallery_resize, :]
                else:
                    gallery_resfeatures = 0
                    gallery_resraw = 0
                    gallery_empty = True

                gallery_resize = gallery_resize - gallery_popsize

                pooled_gallery, pooled_raw = self.att_model.selfpooling_model(gallery_popfeatures, gallery_popraw)
                probesize = pooled_probe.size()
                gallerysize = pooled_gallery.size()
                probe_batch = probesize[0]
                gallery_batch = gallerysize[0]
                gallery_num = gallerysize[1]
                pooled_gallery.unsqueeze(0)
                pooled_gallery = pooled_gallery.expand(probe_batch, gallery_batch, gallery_num)

                encode_scores = self.classifier_model(pooled_probe, pooled_gallery)

                encode_size = encode_scores.size()
                encodemat = encode_scores.view(-1, 2)
                encodemat = F.softmax(encodemat)
                encodemat = encodemat.view(encode_size[0], encode_size[1], 2)
                distmat_qall_g = encodemat[:, :, 0]

                q_start = 0
                for qind, qnum in enumerate(querytranum):
                    distmat_qg = distmat_qall_g[q_start:q_start + qnum, :]
                    distmat_qg = distmat_qg.data.cpu().numpy()
                    percile = np.percentile(distmat_qg, 20)

                    if distmat_qg[distmat_qg <= percile] is not None:
                        distmean = np.mean(distmat_qg[distmat_qg <= percile])
                    else:
                        distmean = np.mean(distmat_qg)

                    single_distmat[qind, gallery_popindex] = distmean
                    q_start = q_start + qnum

                gallery_popindex = gallery_popindex + 1

                if gallery_popindex < gallerylen:

                    gallery_popsize = gallerytranum[gallery_popindex]
                gallery_time.update(time.time() - end)
                end = time.time()

        return evaluate_seq(single_distmat, querypid, querycamid, gallerypid, gallerycamid)
예제 #30
0
def make_variables(inputs):
    expanded = [np.expand_dims(arr, axis=0) for (k, arr) in inputs.items()]
    return [utils.to_torch(arr) for arr in expanded]
예제 #31
0
    def evaluate(self, query_loader, gallery_loader, queryinfo, galleryinfo):

        self.cnn_model.eval()
        self.att_model.eval()
        self.classifier_model.eval()

        querypid = queryinfo.pid
        querycamid = queryinfo.camid
        querytranum = queryinfo.tranum

        gallerypid = galleryinfo.pid
        gallerycamid = galleryinfo.camid
        gallerytranum = galleryinfo.tranum

        pooled_probe, hidden_probe = self.extract_feature(query_loader)

        querylen = len(querypid)
        gallerylen = len(gallerypid)

        # online gallery extraction
        single_distmat = np.zeros((querylen, gallerylen))
        gallery_resize = 0
        gallery_popindex = 0
        gallery_popsize = gallerytranum[gallery_popindex]

        gallery_resfeatures = 0
        gallery_resraw = 0

        gallery_empty = True
        preimgs = 0
        preflows = 0

        # time
        gallery_time = AverageMeter()
        end = time.time()

        for i, (imgs, flows, _, _) in enumerate(gallery_loader):
            imgs = to_torch(imgs)
            flows = to_torch(flows)
            device = torch.device(
                "cuda:0" if torch.cuda.is_available() else "cpu")
            imgs = imgs.to(device)
            flows = flows.to(device)

            with torch.no_grad():
                seqnum = imgs.size(0)
                if i == 0:
                    preimgs = imgs
                    preflows = flows

                if gallery_empty:
                    out_feat, out_raw = self.cnn_model(imgs, flows, self.mode)

                    gallery_resfeatures = out_feat
                    gallery_resraw = out_raw

                    gallery_empty = False

                elif imgs.size(0) < gallery_loader.batch_size:
                    flaw_batchsize = imgs.size(0)
                    cat_batchsize = gallery_loader.batch_size - flaw_batchsize
                    imgs = torch.cat((imgs, preimgs[0:cat_batchsize]), 0)
                    flows = torch.cat((flows, preflows[0:cat_batchsize]), 0)
                    out_feat, out_raw = self.cnn_model(imgs, flows, self.mode)

                    out_feat = out_feat[0:flaw_batchsize]
                    out_raw = out_raw[0:flaw_batchsize]

                    gallery_resfeatures = torch.cat(
                        (gallery_resfeatures, out_feat), 0)
                    gallery_resraw = torch.cat((gallery_resraw, out_raw), 0)

                else:
                    out_feat, out_raw = self.cnn_model(imgs, flows, self.mode)

                    gallery_resfeatures = torch.cat(
                        (gallery_resfeatures, out_feat), 0)
                    gallery_resraw = torch.cat((gallery_resraw, out_raw), 0)

            gallery_resize = gallery_resize + seqnum

            while gallery_popsize <= gallery_resize:

                if (gallery_popindex + 1) % 50 == 0:
                    print('gallery--{:04d}'.format(gallery_popindex))
                gallery_popfeatures = gallery_resfeatures[0:gallery_popsize, :]
                gallery_popraw = gallery_resraw[0:gallery_popsize, :]

                if gallery_popsize < gallery_resize:
                    gallery_resfeatures = gallery_resfeatures[
                        gallery_popsize:gallery_resize, :]
                    gallery_resraw = gallery_resraw[
                        gallery_popsize:gallery_resize, :]
                else:
                    gallery_resfeatures = 0
                    gallery_resraw = 0
                    gallery_empty = True

                gallery_resize = gallery_resize - gallery_popsize

                pooled_gallery, pooled_raw = self.att_model.selfpooling_model(
                    gallery_popfeatures, gallery_popraw)
                probesize = pooled_probe.size()
                gallerysize = pooled_gallery.size()
                probe_batch = probesize[0]
                gallery_batch = gallerysize[0]
                gallery_num = gallerysize[1]
                pooled_gallery.unsqueeze(0)
                pooled_gallery = pooled_gallery.expand(probe_batch,
                                                       gallery_batch,
                                                       gallery_num)

                encode_scores = self.classifier_model(pooled_probe,
                                                      pooled_gallery)

                encode_size = encode_scores.size()
                encodemat = encode_scores.view(-1, 2)
                encodemat = F.softmax(encodemat)
                encodemat = encodemat.view(encode_size[0], encode_size[1], 2)
                distmat_qall_g = encodemat[:, :, 0]

                q_start = 0
                for qind, qnum in enumerate(querytranum):
                    distmat_qg = distmat_qall_g[q_start:q_start + qnum, :]
                    distmat_qg = distmat_qg.data.cpu().numpy()
                    percile = np.percentile(distmat_qg, 20)

                    if distmat_qg[distmat_qg <= percile] is not None:
                        distmean = np.mean(distmat_qg[distmat_qg <= percile])
                    else:
                        distmean = np.mean(distmat_qg)

                    single_distmat[qind, gallery_popindex] = distmean
                    q_start = q_start + qnum

                gallery_popindex = gallery_popindex + 1

                if gallery_popindex < gallerylen:

                    gallery_popsize = gallerytranum[gallery_popindex]
                gallery_time.update(time.time() - end)
                end = time.time()

        return evaluate_seq(single_distmat, querypid, querycamid, gallerypid,
                            gallerycamid)
예제 #32
0
파일: attevaluator.py 프로젝트: zxr8192/GRL
    def extract_feature(self, data_loader):

        self.cnn_model.eval()
        self.siamese_model.eval()

        qf, q_pids, q_camids = [], [], []
        for i, inputs in enumerate(data_loader):
            imgs, pids, camids = inputs

            if self.only_eval:
                b, n, s, c, h, w = imgs.size()  # 1, 5, 8, c, h, w
                imgs = imgs.view(b * n, s, c, h, w).cuda()
                with torch.no_grad():
                    if b * n > 8:  # 如果序列过长,则分成若干个15个batch_size
                        feat_list = []  # 弄一个临时列表,存放特征
                        num = int(math.ceil(b * n * 1.0 / 8))  # 有几个32
                        for y in range(num):
                            clips = imgs[
                                y * 8:(y + 1) *
                                8, :, :, :, :].cuda()  # 32, 8, c, h, w
                            x_uncorr, feats_corr = self.cnn_model(clips)

                            out_frame = self.siamese_model.self_attention(
                                feats_corr)
                            out_feat = torch.cat(
                                (x_uncorr, out_frame, feats_corr.mean(dim=1)),
                                dim=1)

                            feat_list.append(out_feat)
                        feat_list = torch.cat(feat_list, 0)
                        feat_list = torch.mean(feat_list, dim=0)
                        qf.append(feat_list.unsqueeze(0))
                        q_pids.extend(pids)
                        q_camids.extend(camids)
                    else:
                        x_uncorr, feats_corr = self.cnn_model(imgs)

                        out_frame = self.siamese_model.self_attention(
                            feats_corr)
                        out_feat = torch.cat(
                            (x_uncorr, out_frame, feats_corr.mean(dim=1)),
                            dim=1)

                        out_feat = out_feat.view(n, -1)
                        out_feat = torch.mean(out_feat, dim=0)
                        qf.append(out_feat.unsqueeze(0))
                        q_pids.extend(pids)
                        q_camids.extend(camids)
                torch.cuda.empty_cache()
            else:
                b, s, c, h, w = imgs.size()
                imgs = imgs.view(b, s, c, h, w)
                imgs = to_torch(imgs)

                device = torch.device(
                    "cuda:0" if torch.cuda.is_available() else "cpu")
                imgs = imgs.to(device)

                with torch.no_grad():
                    x_uncorr, feats_corr = self.cnn_model(imgs)

                    out_frame = self.siamese_model.self_attention(feats_corr)
                    out_feat = torch.cat(
                        (x_uncorr, out_frame, feats_corr.mean(dim=1)), dim=1)

                    qf.append(out_feat)
                    q_pids.extend(pids)
                    q_camids.extend(camids)
                torch.cuda.empty_cache()

        qf = torch.cat(qf, 0)
        q_pids = np.asarray(q_pids)
        q_camids = np.asarray(q_camids)

        return qf, q_pids, q_camids
예제 #33
0
    def extract_feature(self, data_loader):
        print_freq = 50
        self.cnn_model.eval()
        self.att_model.eval()

        batch_time = AverageMeter()
        data_time = AverageMeter()
        end = time.time()

        allfeatures = 0
        allfeatures_raw = 0

        for i, (imgs, flows, _, _) in enumerate(data_loader):
            imgs = to_torch(imgs)
            flows = to_torch(flows)
            device = torch.device(
                "cuda:0" if torch.cuda.is_available() else "cpu")
            imgs = imgs.to(device)
            flows = flows.to(device)
            with torch.no_grad():
                if i == 0:
                    out_feat, out_raw = self.cnn_model(imgs, flows, self.mode)
                    out_feat, out_raw = self.att_model.selfpooling_model(
                        out_feat, out_raw)
                    allfeatures = out_feat
                    allfeatures_raw = out_raw
                    preimgs = imgs
                    preflows = flows
                elif imgs.size(0) < data_loader.batch_size:
                    flaw_batchsize = imgs.size(0)
                    cat_batchsize = data_loader.batch_size - flaw_batchsize
                    imgs = torch.cat((imgs, preimgs[0:cat_batchsize]), 0)
                    flows = torch.cat((flows, preflows[0:cat_batchsize]), 0)

                    out_feat, out_raw = self.cnn_model(imgs, flows, self.mode)
                    out_feat, out_raw = self.att_model.selfpooling_model(
                        out_feat, out_raw)

                    out_feat = out_feat[0:flaw_batchsize]
                    out_raw = out_feat[0:flaw_batchsize]

                    allfeatures = torch.cat((allfeatures, out_feat), 0)
                    allfeatures_raw = torch.cat((allfeatures_raw, out_raw), 0)
                else:
                    out_feat, out_raw = self.cnn_model(imgs, flows, self.mode)
                    out_feat, out_raw = self.att_model.selfpooling_model(
                        out_feat, out_raw)

                    allfeatures = torch.cat((allfeatures, out_feat), 0)
                    allfeatures_raw = torch.cat((allfeatures_raw, out_raw), 0)

            batch_time.update(time.time() - end)
            end = time.time()

            if (i + 1) % print_freq == 0:
                print('Extract Features: [{}/{}]\t'
                      'Time {:.3f} ({:.3f})\t'
                      'Data {:.3f} ({:.3f})\t'.format(i + 1, len(data_loader),
                                                      batch_time.val,
                                                      batch_time.avg,
                                                      data_time.val,
                                                      data_time.avg))

        return allfeatures, allfeatures_raw