def evaluate(self, carray, issame, nrof_folds=5, tta=False):
     self.net.eval()
     idx = 0
     embeddings = np.zeros([len(carray), self.embedding_dim])
     with torch.no_grad():
         while idx + self.batch_size <= len(carray):
             batch = torch.tensor(carray[idx:idx + self.batch_size])
             if tta:
                 fliped = hflip_batch(batch)
                 emb_batch = self.net(batch.to(self.device)) + self.net(
                     fliped.to(self.device))
                 embeddings[idx:idx + self.batch_size] = l2_norm(emb_batch)
             else:
                 embeddings[idx:idx + self.batch_size] = self.net(
                     batch.to(self.device)).cpu()
             idx += self.batch_size
         if idx < len(carray):
             batch = torch.tensor(carray[idx:])
             if tta:
                 fliped = hflip_batch(batch)
                 emb_batch = self.net(batch.to(self.device)) + self.net(
                     fliped.to(self.device))
                 embeddings[idx:] = l2_norm(emb_batch)
             else:
                 embeddings[idx:] = self.net(batch.to(self.device)).cpu()
     tpr, fpr, accuracy, best_thresholds = evaluate(embeddings, issame,
                                                    nrof_folds)
     buf = gen_plot(fpr, tpr)
     roc_curve = Image.open(buf)
     roc_curve_tensor = trans.ToTensor()(roc_curve)
     self.net.train()
     return accuracy.mean(), best_thresholds.mean(), roc_curve_tensor
    def nearest_neighbors(self, word: str, N=10):
        '''
        Get the `N` nearest neighbors of `word`, and their distances.

        Parameters
        ------------
        word: str
        
        N: int
            number of nearest neighbors to compute.

        Returns
        -----------
        (nearest_words, nearest_words_distances)

        '''
        word_idx = self.word2index[word]
        word_embedding = self.index2embedding[word_idx]
        if word_idx < 3 or l2_norm(word_embedding) <= self.blacklist_threshold:
            #  word is too close to zero vector
            return [], np.array([])
        distances = np.array(len(self.index2word) * [0.0])
        for index, embedding in enumerate(self.index2embedding):
            if index < 3 or l2_norm(embedding) <= self.blacklist_threshold:
                # ignore <PAD>, <UNK> and blacklisted words
                distance = np.inf
            else:
                distance = l2_norm(word_embedding - embedding)
            distances[index] = distance
        # get N nearest indexes, ignore word_idx
        nearest_indexes = np.argsort(distances)[1:N + 1]
        nearest_words = [self.index2word[idx] for idx in nearest_indexes]
        nearest_words_distances = distances[nearest_indexes]
        return nearest_words, nearest_words_distances
def perform_val(embedding_size,
                batch_size,
                model,
                carray,
                issame,
                nrof_folds=10,
                is_ccrop=False,
                is_flip=True):
    """perform val"""
    embeddings = np.zeros([len(carray), embedding_size])

    for idx in tqdm.tqdm(range(0, len(carray), batch_size)):
        batch = carray[idx:idx + batch_size]
        batch = np.transpose(batch, [0, 2, 3, 1]) * 0.5 + 0.5
        if is_ccrop:
            batch = ccrop_batch(batch)
        if is_flip:
            fliped = hflip_batch(batch)
            emb_batch = model(batch) + model(fliped)
            embeddings[idx:idx + batch_size] = l2_norm(emb_batch)
        else:
            batch = ccrop_batch(batch)
            emb_batch = model(batch)
            embeddings[idx:idx + batch_size] = l2_norm(emb_batch)

    tpr, fpr, accuracy, best_thresholds = evaluate(embeddings, issame,
                                                   nrof_folds)

    return accuracy.mean(), best_thresholds.mean()
Ejemplo n.º 4
0
    def forward_mf(self, inp, weights, name="mf_model"):
        """
        :param inp: batch of (user_id, item_id)
        :param weights:
        :param reuse:
        :param lamb_u:
        :param lamb_v:
        :param learning_r:
        :param name:
        :return:
        """
        U = weights['users']
        V = weights['items']
        with tf.name_scope(name):
            with tf.name_scope("batch_embeddings"):
                with tf.name_scope('batch_ids'):
                    u_indices = inp[:, 0]
                    v_indices = inp[:, 1]
                with tf.name_scope('batch_embeddings'):
                    u_batch = tf.nn.embedding_lookup(U, u_indices)
                    v_batch = tf.nn.embedding_lookup(V, v_indices)

            with tf.name_scope('preds'):
                predict_score = tf.reduce_sum(tf.multiply(u_batch, v_batch),
                                              axis=1,
                                              name='preds')

            # calculate loss
            with tf.name_scope("regularization"):
                reg = FLAGS.lamb_u * l2_norm(u_batch) + FLAGS.lamb_v * l2_norm(
                    v_batch)

            return predict_score, reg
Ejemplo n.º 5
0
 def evaluate(self, cfg, carray, issame, nrof_folds=5, tta=False):
     self.model.eval()
     idx = 0
     embeddings = np.zeros([len(carray), cfg.MODEL.HEADS.EMBEDDING_DIM])
     batch_size = cfg.SOLVER.IMS_PER_BATCH
     with torch.no_grad():
         while idx + batch_size <= len(carray):
             batch = torch.tensor(carray[idx:idx + batch_size])
             if tta:
                 fliped = hflip_batch(batch)
                 emb_batch = self.model(batch.to(self.device)) + self.model(
                     fliped.to(self.device))
                 embeddings[idx:idx + batch_size] = l2_norm(emb_batch)
             else:
                 embeddings[idx:idx + batch_size] = self.model(
                     batch.to(self.device)).cpu()
             idx += batch_size
         if idx < len(carray):
             batch = torch.tensor(carray[idx:])
             if tta:
                 fliped = hflip_batch(batch)
                 emb_batch = self.model(batch.to(self.device)) + self.model(
                     fliped.to(self.device))
                 embeddings[idx:] = l2_norm(emb_batch)
             else:
                 embeddings[idx:] = self.model(batch.to(self.device)).cpu()
     tpr, fpr, accuracy, best_thresholds = scores(embeddings, issame,
                                                  nrof_folds)
     buf = gen_plot(fpr, tpr)
     roc_curve = Image.open(buf)
     roc_curve_tensor = trans.ToTensor()(roc_curve)
     return accuracy.mean(), best_thresholds.mean(), roc_curve_tensor
def train(model,
          classifier,
          criterion,
          optimizer,
          trainloader,
          use_gpu,
          num_classes,
          epoch,
          args):
    model.train()
    losses = AverageMeter()

    if args.plot:
        all_features, all_labels = [], []

    for batch_idx, (data, labels) in enumerate(trainloader):
        if use_gpu:
            data, labels = data.cuda(), labels.cuda()
        features = model(data)
        outputs, _ = classifier(features, labels)
        loss = criterion(outputs, labels)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        losses.update(loss.item(), labels.size(0))

        if args.plot_normalized:
            features = l2_norm(features)

        if args.plot:
            if use_gpu:
                all_features.append(features.data.cpu().numpy())
                all_labels.append(labels.data.cpu().numpy())
            else:
                all_features.append(features.data.numpy())
                all_labels.append(labels.data.numpy())

        if (batch_idx + 1) % args.print_freq == 0:
            print("Batch {}/{}\t Loss {:.6f} ({:.6f})"
                  .format(batch_idx + 1, len(trainloader), losses.val, losses.avg))

    if args.plot:
        weights = None
        centers = classifier.weight.data
        if args.plot_normalized:
            centers = l2_norm(classifier.weight.data)
        all_features = np.concatenate(all_features, 0)
        all_labels = np.concatenate(all_labels, 0)
        plot_features(all_features, weights, centers.cpu().numpy(), all_labels, num_classes, epoch, prefix='train', args=args)
Ejemplo n.º 7
0
def main():
    df = pd.read_csv(os.path.join(OUTPUT_DIR,
                                  'features.csv')).sort_values('prediction')
    value_counts = df['prediction'].value_counts().to_dict()

    # compute positions
    border_index = (df['prediction'] == 0).sum()
    clothe_y = border_index // 2
    person_y = border_index + (len(df) - border_index) // 2
    x = df.shape[0] // 2

    text_props = dict(fontdict={
        'color': 'grey',
        'alpha': 0.3,
        'size': 30
    },
                      horizontalalignment='center',
                      verticalalignment='center')

    for layer in ['l2', 'l3', 'avgp']:
        print(f'Processing: {layer}')
        use_cols = [col for col in df.columns if col.startswith(layer)]
        vectors = l2_norm(df[use_cols].values[:, :])

        fig, ax = plt.subplots()
        ax = sns.heatmap(vectors, cmap='YlGnBu', ax=ax)
        ax.axhline(border_index)
        ax.text(x, clothe_y, 'Clothe ({})'.format(value_counts[0]),
                **text_props)
        ax.text(x, person_y, 'Person ({})'.format(value_counts[1]),
                **text_props)
        ax.set_xticks([])
        ax.set_yticks([])
        plt.savefig(os.path.join(OUTPUT_DIR, f'heatmap_{layer}.png'), dpi=1000)
    def forward(self, x, label=None):
        x = self.relu1_1(self.conv1_1(x))
        x = x + self.relu1_3(self.conv1_3(self.relu1_2(self.conv1_2(x))))

        x = self.relu2_1(self.conv2_1(x))
        x = x + self.relu2_3(self.conv2_3(self.relu2_2(self.conv2_2(x))))
        x = x + self.relu2_5(self.conv2_5(self.relu2_4(self.conv2_4(x))))

        x = self.relu3_1(self.conv3_1(x))
        x = x + self.relu3_3(self.conv3_3(self.relu3_2(self.conv3_2(x))))
        x = x + self.relu3_5(self.conv3_5(self.relu3_4(self.conv3_4(x))))
        x = x + self.relu3_7(self.conv3_7(self.relu3_6(self.conv3_6(x))))
        x = x + self.relu3_9(self.conv3_9(self.relu3_8(self.conv3_8(x))))

        x = self.relu4_1(self.conv4_1(x))
        x = x + self.relu4_3(self.conv4_3(self.relu4_2(self.conv4_2(x))))

        x = x.view(x.size(0), -1)
        x = self.fc5(x)

        x = l2_norm(x)

        if label is None:
            return x
        else:
            x = self.am_softmax(x, label)
            return x
Ejemplo n.º 9
0
    def infer(self, faces, target_embs, tta=False):
        '''
        faces : list of PIL Image
        target_embs : [n, 512] computed embeddings of faces in facebank
        names : recorded names of faces in facebank
        tta : test time augmentation (hfilp, that's all)
        '''
        embs = []
        for img in faces:
            if tta:
                mirror = trans.functional.hflip(img)
                emb = self.model(
                    self.test_transform(img).to(self.device).unsqueeze(0))
                emb_mirror = self.model(
                    self.test_transform(mirror).to(self.device).unsqueeze(0))
                embs.append(l2_norm(emb + emb_mirror))
            else:
                embs.append(
                    self.model(
                        self.test_transform(img).to(self.device).unsqueeze(0)))
        source_embs = torch.cat(embs)

        if isinstance(target_embs, list):
            tmp = []
            for img in target_embs:
                if tta:
                    mirror = trans.functional.hflip(img)
                    tmp = self.model(
                        self.test_transform(img).to(self.device).unsqueeze(0))
                    tmp_mirror = self.model(
                        self.test_transform(mirror).to(
                            self.device).unsqueeze(0))
                    tmp.append(l2_norm(tmp + tmp_mirror))
                else:
                    tmp.append(
                        self.model(
                            self.test_transform(img).to(
                                self.device).unsqueeze(0)))
            target_embs = torch.cat(tmp)

        diff = source_embs.unsqueeze(-1) - target_embs.transpose(
            1, 0).unsqueeze(0)
        dist = torch.sum(torch.pow(diff, 2), dim=1)
        minimum, min_idx = torch.min(dist, dim=1)
        min_idx[minimum > self.threshold] = -1  # if no match, set idx to -1
        return min_idx, minimum
Ejemplo n.º 10
0
 def label_projection(self,
                      label_vec,
                      img_data,
                      site=-1,
                      left_to_right=True,
                      normalize_phi_rn=True):
     """"""
     if site < 0: site = img_data.rnb
     idx = site if left_to_right else site - 1
     sl_in_left_rn = bool(idx > self.__w.sl)
     sl_in_right_rn = bool(idx + 1 < self.__w.sl)
     scale = [1., 1., 1., 1.]
     if normalize_phi_rn:
         scale[0] = inv_phi_rn(
             img_data.phi_rn[idx - 1]) if sl_in_left_rn else (
                 1. / l2_norm(img_data.phi_rn[idx - 1]))
         scale[1] = (1. / l2_norm(img_data.phi[idx]))
         scale[2] = (1. / l2_norm(img_data.phi[idx + 1]))
         scale[3] = inv_phi_rn(
             img_data.phi_rn[idx + 2]) if sl_in_right_rn else (
                 1. / l2_norm(img_data.phi_rn[idx + 2]))
     if sl_in_left_rn:
         net = self.net["bt_project_sll"]
         scale[0].transpose()
         net.putTensor("LAB", label_vec)
         net.putTensor("PHIL", scale[0])
         net.putTensor("PHII", img_data.phi[idx] * scale[1])
         net.putTensor("PHIJ", img_data.phi[idx + 1] * scale[2])
         net.putTensor("PHIR", img_data.phi_rn[idx + 2] * scale[3])
     elif sl_in_right_rn:
         net = self.net["bt_project_slr"]
         scale[3].transpose()
         net.putTensor("LAB", label_vec)
         net.putTensor("PHIL", img_data.phi_rn[idx - 1] * scale[0])
         net.putTensor("PHII", img_data.phi[idx] * scale[1])
         net.putTensor("PHIJ", img_data.phi[idx + 1] * scale[2])
         net.putTensor("PHIR", scale[3])
     else:
         net = self.net["label_projection"]
         net.putTensor("LAB", label_vec)
         net.putTensor("PHIL", img_data.phi_rn[idx - 1] * scale[0])
         net.putTensor("PHII", img_data.phi[idx] * scale[1])
         net.putTensor("PHIJ", img_data.phi[idx + 1] * scale[2])
         net.putTensor("PHIR", img_data.phi_rn[idx + 2] * scale[3])
     proj = net.launch()
     return proj
Ejemplo n.º 11
0
 def __kernel_matrix(learner_seq, expert_seq, kernel_bandwidth):
     """
     Construct kernel matrix based on learn sequence and expert sequence, each entry of the matrix 
     is the distance between two data points in learner_seq or expert_seq. return two matrix, left_mat 
     is the distances between learn sequence and learn sequence, right_mat is the distances between 
     learn sequence and expert sequence.
     """
     # calculate l2 distances
     learner_learner_mat = utils.l2_norm(
         learner_seq,
         learner_seq)  # [batch_size*seq_len, batch_size*seq_len]
     expert_learner_mat = utils.l2_norm(
         expert_seq,
         learner_seq)  # [batch_size*seq_len, batch_size*seq_len]
     # exponential kernel
     learner_learner_mat = tf.exp(-learner_learner_mat / kernel_bandwidth)
     expert_learner_mat = tf.exp(-expert_learner_mat / kernel_bandwidth)
     return learner_learner_mat, expert_learner_mat
Ejemplo n.º 12
0
    def forward_bpr(self, inp, weights, name="bpr_model"):
        with tf.name_scope(name=name):
            u, i, j = inp[:, 0], inp[:, 1], inp[:, 2]
            U = weights['users']
            V = weights['items']
            u_batch = tf.nn.embedding_lookup(U, u)
            i_batch = tf.nn.embedding_lookup(V, i)
            j_batch = tf.nn.embedding_lookup(V, j)
            # print(u_batch.shape)
            ui = tf.reduce_sum(tf.multiply(u_batch, i_batch), axis=1)
            uj = tf.reduce_sum(tf.multiply(u_batch, j_batch), axis=1)
            # print(uj)

            uij = ui - uj
            # print(uij.shape)

            reg = FLAGS.lamb_u * l2_norm(u_batch) + FLAGS.lamb_v * (
                l2_norm(i_batch) + l2_norm(j_batch))

            return uij, reg
Ejemplo n.º 13
0
 def _optimize_actor(self, batch):
     pi = self.model.actor(batch.state)
     new_actions = pi.rsample()
     reg = utils.l2_norm(self.model.actor.named_parameters())
     value = (1. - batch.done) * self.model.critic(batch.state,
                                                   new_actions).squeeze(-1)
     return (-value).mean(), {
         "actor/value": value.mean().detach(),
         "actor/loc": pi.mean.mean(),
         "actor/reg": reg.mean(),
         "actor/scale": pi.variance.mean()
     }
Ejemplo n.º 14
0
def get_embeds(img):
    start_time = time.time()
    # print("[*] Encode {} to ./output_embeds.npy".format(FLAGS.img_path))
    #img = cv2.imread(FLAGS.img_path)
    img = cv2.resize(img, (cfg['input_size'], cfg['input_size']))
    img = img.astype(np.float32) / 255.
    if len(img.shape) == 3:
        img = np.expand_dims(img, 0)
    embeds = l2_norm(model(img))
    #np.save('./output_embeds.npy', embeds)
    print("time taken: ", end=" ")
    print((time.time() - start_time))
    return embeds
Ejemplo n.º 15
0
def evaluate(model, classifier, criterion, testloader, use_gpu, num_classes, epoch, args):
    model.eval()
    correct, total = 0, 0
    if args.plot:
        all_features, all_labels = [], []

    with torch.no_grad():
        for data, labels in testloader:
            if use_gpu:
                data, labels = data.cuda(), labels.cuda()
            features = model(data)
            _, cosine = classifier(features, labels)
            predictions = cosine.data.max(1)[1]
            total += labels.size(0)
            correct += (predictions == labels.data).sum()

            if args.plot_normalized:
                features = l2_norm(features)

            if args.plot:
                if use_gpu:
                    all_features.append(features.data.cpu().numpy())
                    all_labels.append(labels.data.cpu().numpy())
                else:
                    all_features.append(features.data.numpy())
                    all_labels.append(labels.data.numpy())

    if args.plot:
        all_features = np.concatenate(all_features, 0)
        all_labels = np.concatenate(all_labels, 0)
        weights = None
        centers = classifier.weight.data
        if args.plot_normalized:
            centers = l2_norm(classifier.weight.data)
        plot_features(all_features, weights, centers.cpu().numpy(), all_labels, num_classes, epoch, prefix='test', args=args)

    acc = correct * 100. / total
    err = 100. - acc
    return acc, err
Ejemplo n.º 16
0
def mp_ef_projection(args):
    """"""
    sample_start, sample_end, site, sl_on_left, normalize_phi_rn = args
    idx = site if sl_on_left else site - 1
    netd = shared.NET["decision_fn_left"] if sl_on_left else shared.NET[
        "decision_fn_right"]
    netp = shared.NET["label_projection"]
    dB = UniTensor()

    for s in xrange(sample_start, sample_end):
        netd.putTensor("WI", shared.W[idx])
        netd.putTensor("WJ", shared.W[idx + 1])
        netd.putTensor("PHIL", shared.PHI_RN[s][idx - 1])
        netd.putTensor("PHII", shared.PHI[s][idx])
        netd.putTensor("PHIJ", shared.PHI[s][idx + 1])
        netd.putTensor("PHIR", shared.PHI_RN[s][idx + 2])
        df = netd.launch()
        # df.permute(1) # Mysteriously, sometimes Pool.map mess up the final permute in launch!
        ef = shared.TL[s] + (-1.) * df

        scale = [1., 1., 1., 1.]
        if normalize_phi_rn:
            scale[0] = (1. / l2_norm(shared.PHI_RN[s][idx - 1]))
            scale[1] = (1. / l2_norm(shared.PHI[s][idx]))
            scale[2] = (1. / l2_norm(shared.PHI[s][idx + 1]))
            scale[3] = (1. / l2_norm(shared.PHI_RN[s][idx + 2]))
        netp.putTensor("LAB", ef)
        netp.putTensor("PHIL", shared.PHI_RN[s][idx - 1] * scale[0])
        netp.putTensor("PHII", shared.PHI[s][idx] * scale[1])
        netp.putTensor("PHIJ", shared.PHI[s][idx + 1] * scale[2])
        netp.putTensor("PHIR", shared.PHI_RN[s][idx + 2] * scale[3])
        proj = netp.launch()
        try:
            dB += proj
        except:
            dB = proj

    return exportElem(dB)
Ejemplo n.º 17
0
 def label_projection(self,
                      label_vec,
                      img_data,
                      site=-1,
                      sl_on_left=True,
                      normalize_phi_rn=True):
     """"""
     if site < 0: site = self.__w.sl
     idx = site if sl_on_left else site - 1
     scale = [1., 1., 1., 1.]
     if normalize_phi_rn:
         scale[0] = (1. / l2_norm(img_data.phi_rn[idx - 1]))
         scale[1] = (1. / l2_norm(img_data.phi[idx]))
         scale[2] = (1. / l2_norm(img_data.phi[idx + 1]))
         scale[3] = (1. / l2_norm(img_data.phi_rn[idx + 2]))
     net = self.net["label_projection"]
     net.putTensor("LAB", label_vec)
     net.putTensor("PHIL", img_data.phi_rn[idx - 1] * scale[0])
     net.putTensor("PHII", img_data.phi[idx] * scale[1])
     net.putTensor("PHIJ", img_data.phi[idx + 1] * scale[2])
     net.putTensor("PHIR", img_data.phi_rn[idx + 2] * scale[3])
     proj = net.launch()
     return proj
Ejemplo n.º 18
0
    def struct_embeddings(self, config, vocab):
        emb_list = []
        with tf.device("/cpu:0"):
            l1_norm = tf.zeros([])
            l2_norm = tf.zeros([])
        self.struct_embeddings = []
        for i, (feat, dims) in enumerate(config.mimic_embeddings.items()):
            if dims <= 0: continue
            try:
                vocab_aux = len(vocab.aux_list[feat])
            except KeyError:
                vocab_aux = 2 # binary
            with tf.device("/cpu:0"):
                vocab_dims = vocab_aux
                if feat in config.var_len_features:
                    vocab_dims -= 1
                embedding = tf.get_variable("struct_embedding."+feat, [vocab_dims,
                                                                    config.mimic_embeddings[feat]],
                                            initializer=tf.random_uniform_initializer(-1.0, 1.0))
                self.struct_embeddings.append(embedding)
                l1_norm += utils.l1_norm(embedding)
                l2_norm += utils.l2_norm(embedding)
                if feat in config.var_len_features:
                    embedding = tf.concat(0, [tf.zeros([1, config.mimic_embeddings[feat]]),
                                              embedding], name='struct_concat.'+feat)
                val_embedding = tf.nn.embedding_lookup(embedding, self.aux_data[feat],
                                                       name='struct_embedding_lookup.'+feat)
                if config.inspect == 'struct':
                    val_embedding *= self.struct_enable[feat]
                if feat in config.var_len_features:
                    if config.training and config.struct_keep_prob < 1:
                        # drop random structured info items entirely
                        val_embedding = tf.nn.dropout(val_embedding, config.struct_keep_prob,
                                                      noise_shape=tf.pack([config.batch_size,
                                                                   tf.shape(val_embedding)[1], 1]),
                                                      name='struct_dropout_varlen.'+feat)
                    reduced = tf.reduce_sum(val_embedding, 1,
                                            name='sum_struct_val_embeddings.'+feat)
                    if config.mean_varlen_embs:
                        reduced /= tf.reshape(tf.maximum(self.aux_data_len[feat], 1),
                                              [config.batch_size, 1], name='struct_mean_reshape')
                else:
                    reduced = tf.squeeze(val_embedding, [1])
                    if config.training and config.struct_keep_prob < 1:
                        reduced = tf.nn.dropout(reduced, config.struct_keep_prob,
                                                noise_shape=[config.batch_size, 1, 1],
                                                name='struct_dropout_fixlen.'+feat)
            emb_list.append(reduced)

        return tf.concat(1, emb_list), l1_norm, l2_norm
 def forward(self, embbedings, label):
     kernel_norm = l2_norm(self.kernel, axis=0)
     cos_theta = torch.mm(embbedings, kernel_norm)
     cos_theta = cos_theta.clamp(-1, 1)  # for numerical stability
     phi = cos_theta - self.m
     label = label.view(-1, 1)  #size=(B,1)
     index = cos_theta.data * 0.0  #size=(B,Classnum)
     index.scatter_(1, label.data.view(-1, 1), 1)
     index = index.byte()
     index = Variable(index)
     output = cos_theta * 1.0
     output[index] = phi[index]  #only change the correct predicted output
     output *= self.s  # scale up in order to make softmax work, first introduced in normface
     return output
Ejemplo n.º 20
0
    def forward(self, x):
        x = self.backbone.layer0(x)
        x = self.backbone.layer1(x)
        x = self.backbone.layer2(x)
        x = self.backbone.layer3(x)
        x = self.backbone.layer4(x)
        #         print(x.size())
        x = self.backbone.avg_pool(x)
        #         print(x.size())
        x = x.view(x.size(0), -1)
        x = self.fc(x)
        #         x = self.backbone.last_linear(x)

        x = l2_norm(x)
        return x
Ejemplo n.º 21
0
def build_model(options, params):
    # inputs to the model
    users_id = T.ivector('users_id')
    items_id = T.ivector('items_id')
    y        = T.fvector('y')

    # predictons
    y_hat = get_predictons(options, params, users_id, items_id)
    # cost
    mse = T.mean(T.sqr(y - y_hat))
    cost = mse
    if 'l2_coeff' in options and options['l2_coeff'] > 0.:
        cost += options['l2_coeff'] * sum([l2_norm(p) for p in params.values()])

    return users_id, items_id, y, y_hat, mse, cost
Ejemplo n.º 22
0
    def forward(self, x):
        x = self.backbone.conv1(x)
        x = self.backbone.bn1(x)
        x = self.backbone.relu(x)
        x = self.backbone.maxpool(x)

        x = self.backbone.layer1(x)
        x = self.backbone.layer2(x)
        x = self.backbone.layer3(x)
        x = self.backbone.layer4(x)

        x = self.backbone.avgpool(x)
        x = self.conv_last(x)
        x = x.view(x.size(0), -1)
        x = l2_norm(x)

        return (x)
    def word_distance(self, word1, word2):
        ''' 
        Return the L2 distance between `word1` and `word2`.

        Parameters
        ----------------
        word1: str

        word2: str

        Returns
        ----------
        float
        '''
        index1 = self.word2index[word1]
        index2 = self.word2index[word2]
        return l2_norm(self.index2embedding[index1] -
                       self.index2embedding[index2])
Ejemplo n.º 24
0
def extract(skel):
    """Extract. timestep x 2 x num_joints (25) x 3"""
    timestep = skel.shape[0]
    keep_joints = [1, 4, 6, 8, 10, 12, 14, 16, 18, 20, 21]
    skel = skel[:, :, [keep_joint - 1 for keep_joint in keep_joints]]
    skel = skel.reshape(timestep, -1, 3)  # timestep x 22 x 3
    num_joints = len(keep_joints)

    jjd, jjv = [], []
    for t in range(timestep):
        jjd_t, jjv_t = [], []
        for i in range(num_joints):
            for j in range(i, num_joints, 1):
                # joint-joint distance
                jjd_t.append(utils.l2_norm(skel[t, i], skel[t, j]))

                # joint-joint vector
                jjv_t.append(skel[t, i] - skel[t, j])
        jjd.append(jjd_t)
        jjv.append(jjv_t)
Ejemplo n.º 25
0
 def _optimize_critic(self, batch):
     with torch.no_grad():
         v_t = self.model.baseline(batch.next_state).squeeze()
     v_tm1 = self.model.baseline(batch.state).squeeze()
     q_tm1 = self.model.critic(batch.state, batch.action).squeeze()
     discount_t = (1 - batch.done) * self.gamma
     r_t = batch.reward
     vtrace_target = rlego.vtrace_td_error_and_advantage(
         v_tm1.detach(),
         v_t,
         r_t,
         discount_t,
         rho_tm1=torch.ones_like(discount_t))
     q_loss = (vtrace_target.q_estimate -
               q_tm1) * (1 - config.gamma) + utils.l2_norm(
                   self.model.critic.named_parameters())
     td_loss = (vtrace_target.target_tm1 - v_tm1) * (1 - config.gamma)
     loss = 0.5 * q_loss.pow(2) + 0.5 * td_loss.pow(2)
     return loss.mean(), {
         "critic/td": td_loss.mean().detach(),
         "critic/q_loss": q_loss.mean().detach()
     }
Ejemplo n.º 26
0
def build_model(options, params):
    # inputs to the model
    users_id   = T.ivector('users_id')
    items_id   = T.ivector('items_id')
    bow        = T.fmatrix('bow')
    y          = T.fvector('y')

    alpha = options['alpha']

    # predictons
    y_pred, bow_pred = get_predictons(options, params, users_id, items_id)
    # LF model cost
    mse = T.mean(T.sqr(y - y_pred))

    # BOW negative-log-likelihood cost
    nll = T.mean(T.nnet.categorical_crossentropy(bow_pred, bow))

    cost = alpha * mse + (1-alpha) * nll

    if 'l2_coeff' in options and options['l2_coeff'] > 0.:
        cost += options['l2_coeff'] * sum([l2_norm(p) for p in params.values()])

    return users_id, items_id, bow, y, y_pred, bow_pred, mse, nll, cost
Ejemplo n.º 27
0
    def decompose(self):
        """
        Starting initially with Gram-Schmidt.
        """
        self.Q = np.array(self.A)
        self.R = np.zeros_like(self.A)

        M, N = self.Q.shape

        # for each column
        for i in range(N):
            # for each column on the left of current
            for j in range(i):
                # calculate projection of ith col on jth col
                p = proj(self.Q[:, i], self.Q[:, j])
                self.Q[:, i] -= p
            # normalize ith colum
            self.Q[:, i] /= l2_norm(self.Q[:, i])

        # compute R
        self.R = np.dot(self.Q.T, self.A)

        return self.Q, self.R
Ejemplo n.º 28
0

device = torch.device('cuda')
model_pool = get_model_pool(device)
print('----models load over----')
faces = os.listdir('securityAI_round1_images')
faces.sort(key=lambda x: int(x[:-4]))

vectors_list = []
for model in model_pool:
    vectors = []
    for face in faces:
        face = utils.to_torch_tensor(
            Image.open('securityAI_round1_images/' + face))
        face = face.unsqueeze_(0).to(device)
        vectors.append(utils.l2_norm(model(face)).detach_())
    vectors_list.append(vectors)
print('----vectors calculate over----')

confusion_matrixes = []
for vectors in vectors_list:
    s = torch.FloatTensor(len(vectors), len(vectors))
    for i, vector1 in enumerate(vectors):
        for j, vector2 in enumerate(vectors[i + 1:]):
            tmp = (vector1 * vector2).sum().item()
            # print(i,j + i + 1,tmp)
            s[i, j + i + 1] = tmp
            s[j + i + 1, i] = tmp
    for i in range(712):
        s[i, i] = 0
    print(s)
Ejemplo n.º 29
0
subjects = id_list
label_int = source_id
embed_list = []
label_list = []
for subject in tqdm(subjects):
    template_paths = glob(dataset_path + subject + "/*")[0]
    img_paths = glob(template_paths + "/*")
    for img_path in img_paths:
        img = cv2.imread(img_path)
        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        img = cv2.resize(img, (112, 112))
        img = img.astype(np.float32) / 255.
        if len(img.shape) == 3:
            img = np.expand_dims(img, 0)

        embeds = l2_norm(model(img, training=False))
        embed_list.append(embeds[0].numpy())
        label_list.append(label_int[subjects.index(subject)])
embed_list = np.asarray(embed_list)
label_list = np.asarray(label_list)

save_path = "/raid/workspace/jbpark/IJB-C/numpy/"
if projection_head:
    save_name = f'ms1m_{backbone_type}_{head_type}_ProjectionHead/'
else:
    save_name = f'ms1m_{backbone_type}_{head_type}/'
Path(f'{save_path}{save_name}').mkdir(parents=True, exist_ok=True)
np.save(f'{save_path}{save_name}ijbc_gallery_vectors.npy', embed_list)
np.save(f'{save_path}{save_name}ijbc_gallery_labels.npy', label_list)

dataset_path = "/raid/workspace/jbpark/IJB-C/Probes/"
Ejemplo n.º 30
0
    def struct_embeddings(self, config, vocab):
        emb_list = []
        with tf.device("/cpu:0"):
            l1_norm = tf.zeros([])
            l2_norm = tf.zeros([])
        self.struct_embeddings = []
        for i, (feat, dims) in enumerate(config.mimic_embeddings.items()):
            if dims <= 0: continue
            try:
                vocab_aux = len(vocab.aux_list[feat])
            except KeyError:
                vocab_aux = 2  # binary
            with tf.device("/cpu:0"):
                vocab_dims = vocab_aux
                if feat in config.var_len_features:
                    vocab_dims -= 1
                embedding = tf.get_variable(
                    "struct_embedding." + feat,
                    [vocab_dims, config.mimic_embeddings[feat]],
                    initializer=tf.random_uniform_initializer(-1.0, 1.0))
                self.struct_embeddings.append(embedding)
                l1_norm += utils.l1_norm(embedding)
                l2_norm += utils.l2_norm(embedding)
                if feat in config.var_len_features:
                    embedding = tf.concat(0, [
                        tf.zeros([1, config.mimic_embeddings[feat]]), embedding
                    ],
                                          name='struct_concat.' + feat)
                val_embedding = tf.nn.embedding_lookup(
                    embedding,
                    self.aux_data[feat],
                    name='struct_embedding_lookup.' + feat)
                if config.inspect == 'struct':
                    val_embedding *= self.struct_enable[feat]
                if feat in config.var_len_features:
                    if config.training and config.struct_keep_prob < 1:
                        # drop random structured info items entirely
                        val_embedding = tf.nn.dropout(
                            val_embedding,
                            config.struct_keep_prob,
                            noise_shape=tf.pack([
                                config.batch_size,
                                tf.shape(val_embedding)[1], 1
                            ]),
                            name='struct_dropout_varlen.' + feat)
                    reduced = tf.reduce_sum(val_embedding,
                                            1,
                                            name='sum_struct_val_embeddings.' +
                                            feat)
                    if config.mean_varlen_embs:
                        reduced /= tf.reshape(tf.maximum(
                            self.aux_data_len[feat], 1),
                                              [config.batch_size, 1],
                                              name='struct_mean_reshape')
                else:
                    reduced = tf.squeeze(val_embedding, [1])
                    if config.training and config.struct_keep_prob < 1:
                        reduced = tf.nn.dropout(
                            reduced,
                            config.struct_keep_prob,
                            noise_shape=[config.batch_size, 1, 1],
                            name='struct_dropout_fixlen.' + feat)
            emb_list.append(reduced)

        return tf.concat(1, emb_list), l1_norm, l2_norm