示例#1
0
def prepare_clauses(clauses, anno, converter, objs):
    this_obj = []
    # append object observation
    for a in anno:
        if a['object']['category'] not in this_obj:
            this_obj.append(a['object']['category'])
        if a['subject']['category'] not in this_obj:
            this_obj.append(a['subject']['category'])

    r_clauses = relevant_clauses(
        clauses, [list(j) for j in combinations_with_replacement(this_obj, 2)],
        converter)
    assumptions = []
    for a in anno:
        assumptions.append([
            converter.name2num((box_prop_name(a['subject']['bbox'],
                                              a['object']['bbox']),
                                objs[a['subject']['category']],
                                objs[a['object']['category']]))
        ])

    r_clauses = r_clauses + assumptions
    r_container = RelevantFormulaContainer(r_clauses)

    return r_clauses, r_container
示例#2
0
def prediction_to_assignment_embedding(softmax_list, info_list, embedder,
                                       tokenizers, pres, objs):
    """
    :param softmax_list: a torch.Tensor
    :param info_list: [(img_name, (label1, bbox1), (label2, bbox2))]
    :param embedders: (pe, ce, ae)
    :return: the embedding of assignment
    """
    def _feature(name):
        embedding = np.array([
            word_vectors[tokenizers['vocab2token'][i]] for i in name.split(' ')
        ])
        summed_embedding = np.sum(embedding, axis=0)
        return summed_embedding

    embedded_clauses = []
    for idx, info in enumerate(info_list):
        prop = softmax_list[idx]
        sub = objs[info_list[idx][1][0]]
        obj = objs[info_list[idx][2][0]]

        e_p = 0
        for pres_idx in range(0, 70):
            # e_p += prop[pres_idx] * torch.cuda.FloatTensor(_feature(tokenizers['token2vocab'][pres_idx]))
            e_p += prop[pres_idx] * torch.cuda.FloatTensor(
                _feature(pres[pres_idx]))
        e_p = e_p / 70
        e_p = (e_p + torch.cuda.FloatTensor(_feature(sub)) +
               torch.cuda.FloatTensor(_feature(obj))) / 3

        embedded_clauses.append(e_p)

    for idx, info in enumerate(info_list):
        pos = box_prop_name(info_list[idx][1][1], info_list[idx][2][1])
        if pos in POS_REL_NAMES_FULL.keys():
            pos = POS_REL_NAMES_FULL[pos]
        sub = objs[info_list[idx][1][0]]
        obj = objs[info_list[idx][2][0]]
        embedded_clauses.append(
            (torch.cuda.FloatTensor(_feature(pos)) + torch.cuda.FloatTensor(
                _feature(sub)) + torch.cuda.FloatTensor(_feature(obj))) / 3)
    adj0, features0, labels0 = assignment_to_gcn_compatible(
        embedded_clauses, node_features)

    embedded_clauses = embedder(features0.cuda().squeeze(0),
                                adj0.cuda().squeeze(0), labels0.cuda())

    return embedded_clauses
示例#3
0
def tree_prediction_to_assignment_embedding(softmax_list, info_list, embedder,
                                            tokenizers, pres, objs):
    def _feature(name):
        embedding = np.array([
            word_vectors[tokenizers['vocab2token'][i]] for i in name.split(' ')
        ])
        summed_embedding = np.sum(embedding, axis=0)
        return summed_embedding

    embedded_clauses = []
    for idx, info in enumerate(info_list):
        prop = softmax_list[idx]
        sub = objs[info_list[idx][1][0]]
        obj = objs[info_list[idx][2][0]]

        e_p = 0
        for pres_idx in range(0, 70):
            # e_p += prop[pres_idx] * torch.cuda.FloatTensor(_feature(tokenizers['token2vocab'][pres_idx]))
            e_p += prop[pres_idx] * torch.cuda.FloatTensor(
                _feature(pres[pres_idx]))
        e_p = e_p / 70
        e_p = (e_p + torch.cuda.FloatTensor(_feature(sub)) +
               torch.cuda.FloatTensor(_feature(obj))) / 3

        embedded_clauses.append(e_p)

    for idx, info in enumerate(info_list):
        pos = box_prop_name(info_list[idx][1][1], info_list[idx][2][1])
        if pos in POS_REL_NAMES_FULL.keys():
            pos = POS_REL_NAMES_FULL[pos]
        sub = objs[info_list[idx][1][0]]
        obj = objs[info_list[idx][2][0]]
        embedded_clauses.append(
            (torch.cuda.FloatTensor(_feature(pos)) + torch.cuda.FloatTensor(
                _feature(sub)) + torch.cuda.FloatTensor(_feature(obj))) / 3)
    tree0, features0, labels0 = assignment_to_tree_compatible(
        embedded_clauses, node_features)

    embedded_clauses = embedder(tree0, features0.cuda())

    return embedded_clauses
示例#4
0
for i in POS_REL_NAMES:
    pres.append(i)
pres.append('_exists')
pres.append('_unique')

rel_to_pos = {}
# rel_to_pos_r = {}
# rel_to_impossible_pos = {}
clauses = []
var_pool = pysat.formula.IDPool(start_from=1)
converter = Converter(var_pool, pres, objs)

for _, annos in annotation.items():
    for anno in annos:
        pos_id = pres.index(
            box_prop_name(anno['subject']['bbox'], anno['object']['bbox']))
        pos_id_r = pres.index(
            box_prop_name(anno['object']['bbox'], anno['subject']['bbox']))
        rel_id = anno['predicate']
        subject_id = anno['subject']['category']
        object_id = anno['object']['category']

        if (rel_id, subject_id, object_id) not in rel_to_pos.keys():
            rel_to_pos[(rel_id, subject_id, object_id)] = []
        if (pos_id, subject_id,
                object_id) not in rel_to_pos[(rel_id, subject_id, object_id)]:
            rel_to_pos[(rel_id, subject_id, object_id)].append(
                (pos_id, subject_id, object_id))

for rel, positions in rel_to_pos.items():
    this_clause = [-var_pool.id(rel)]