def __init__(self, noise_var, hparam): # get the constellation self.constellation = hparam.constellation self.hparam = hparam # set the graph self.graph = fg.Graph() # add the discrete random variables to graph self.n_symbol = hparam.num_tx for idx in range(hparam.num_tx): self.graph.rv("x{}".format(idx), len(self.constellation))
def _build_xgraph(graphs, tuples, pot): """ Makes the interconnected (across knowledge dimension) graph. Args: graphs ([AttrGraph]) tuples ([(str, str)]) Attr pairs to add cxns between frame RVs pot (np.ndarray of shape 3x3) Returns fg.Graph: the xgraph """ xgraph = fg.Graph(debug=False) base_logger.debug('Adding xgraph xfactors...') total = 0 for t in tuples: attr1, attr2 = t g1 = [g for g in graphs if g.name == attr1] g2 = [g for g in graphs if g.name == attr2] # Might not have one or both of the graphs because of the current # settings. if len(g1) != 1 or len(g2) != 1: base_logger.debug( '\t skipping links between missing graphs %s and %s', attr1, attr2) continue g1 = g1[0] g2 = g2[0] # Find RVs that match across both graphs. Pruned RVs won't be returned # by get_rvs() (as they are actually deleted from the graph's underlying # dict), but we do want to make sure we're only linking frames. matches = [] for rv_name, rv1 in g1.graph.get_rvs().iteritems(): if rv1.meta['type'] != 'frame': continue if g2.graph.has_rv(rv_name): rv2 = g2.graph.get_rvs()[rv_name] matches.append([rv1, rv2]) # add factors to our linking graph for match in matches: xgraph.factor(match, 'xfactor', pot, {'type': 'xfactor'}) # reporting base_logger.debug( '\t added %d links between frame RVs between %s and %s', len(matches), attr1, attr2) total += len(matches) base_logger.debug('Added %d xgraph xfactors in total' % (total)) return xgraph
Pb_11 = alpha1 * ((dvxi - dvxj) ** 2 + (dvyi - dvyj) ** 2) \ + alpha2 * (abs(dlxi - dlxj) + abs(dlyi - dlyj)) \ + alpha4 * abs(ddispi - ddispj) if (scorei > theta and scorej > theta): Pb_not_11 = alpha3 else: Pb_not_11 = 0 return np.array([[exp(-Wb * Pb_11), exp(-Wb * Pb_not_11)], [exp(-Wb * Pb_not_11), exp(-Wb * Pb_11)]]) for test_frame_id in range(frame[0], frame[1]): frame_info = all_info[test_frame_id] g = fg.Graph() node_id_list = [l[0] for l in frame_info] for i in range(len(node_id_list)): node_i = str( node_id_list[i] ) # node_i is the track_id (turned into str) for the variable node i g.rv(node_i, 2) # yi can be 0 or 1 for i in range(len(node_id_list)): node_i = str(node_id_list[i]) # first, add the unary factors # frame_info[i] -> [id,dvx,dvy,dl,score,is_fake] g.factor([node_i], potential=calc_unary(frame_info[i], Wu=0.98)) # second, add the binary factors
# User-item rating matrix # movies_dict = build_movies_dict(movies_data) # R = generate_matrix_from_csv(ratings_data, movies_dict) R_clean = generate_100k_matrix(ratings_data) R = generate_dirty_matrix(dirty_ratings_data) user_ground_truth = generate_user_spam_list(spam_users_file) # Data Statistics num_users = np.shape(R)[0] num_items = np.shape(R)[1] print('Initializing...\n') # Initialize Factor Graph Graph = fg.Graph() # Create nodes : node_list = ['m1', 'm2', 'm3', 't1', 't2', 't3'] user_nodes = [] for i in range(num_users): user_nodes.append('m' + str(i)) item_nodes = [] for i in range(num_items): item_nodes.append('t' + str(i)) # Spam Users and Target Items Initializations m = np.random.rand(num_users) m = [0 if i > 0.5 else 1 for i in np.random.rand(num_users)] t = np.random.rand(num_items)