def forward(self, x, bc, f):
    '''
    x: size (batch_size x image_size x image_size)
    return: same size
    '''
    batch_size = x.size(0)
    x = x.unsqueeze(1)

    r = - F.conv2d(x, self.fd_loss_kernel)
    if f is not None:
      r = f + r
    p = r.clone()
    rTr = utils.dot_product(r, r)

    for i in range(self.n_iters):
      p_pad = utils.pad_boundary(p.squeeze(1), torch.zeros(1, 4)).unsqueeze(1)
      Ap = F.conv2d(p_pad, self.fd_loss_kernel)
      pAp = utils.dot_product(p, Ap)
      alpha = (rTr / pAp).view(batch_size, 1, 1, 1)

      # Update
      x = x + alpha * p_pad
      r_new = r - alpha * Ap

      rTr_new = utils.dot_product(r_new, r_new)
      beta = (rTr_new / rTr).view(batch_size, 1, 1, 1)
      p_new = r_new + beta * p

      p = p_new
      r = r_new
      rTr = rTr_new

    return x.squeeze(1)
示例#2
0
    def closest_point(self, f_x, f_y, s_x, s_y, obs_bearing):
        '''
        Calculate the closest point on the line to the feature
        The feature is the point (probably not on the line)
        The line is defined by a point (state x and y) and direction (heading)
        This probably won't return a point that is behind the x-y-bearing.
        Input:
            f_x float (feature's x coordinate)
            f_y float (feature's y coordinate)
            s_x float (robot state's x)
            s_y float (robot state's y)
            obs_bearing float (robot state's heading)
        '''
        origin_to_feature = (
            f_x - s_x,
            f_y - s_y,
            0.0,
        )
        line_parallel = unit((cos(obs_bearing), sin(obs_bearing), 0.0))

        # origin_to_feature dot line_parallel = magnitude of otf along line
        magmag = dot_product(origin_to_feature, line_parallel)

        if magmag < 0:
            return (s_x, s_y)

        scaled_line = scale(line_parallel, magmag)
        scaled_x = scaled_line[0]
        scaled_y = scaled_line[1]

        return (float(s_x + scaled_x), float(s_y + scaled_y))
示例#3
0
def distance_q(source_type, target_type, embs, e_size):
	# if pmode=COS, then using cossim to evaluate the distance
	# if pmode=DOT, then using dot product to evaluate the distance


	target_pool = copy.copy(embs[target_type])
	# if mode == 'Het':
	# 	target_pool = {}
	# 	for n_type in ntypes:
	# 		target_pool.update(embs[n_type])

	while 1:
		n_name = raw_input("Enter your node: ")
		if n_name in embs[source_type]:
			print 'looking for ' + n_name + '...'
			t_emb = embs[source_type][n_name]
			sim_map = {}
			for key in target_pool:
				if pmode == 'COS':
					sim_map[key] = utils.cossim(t_emb, target_pool[key])
				if pmode == 'DOT':
					sim_map[key] = utils.dot_product(t_emb, target_pool[key])
			sim_map = sorted(sim_map.items(), key=operator.itemgetter(1), reverse=True)
			print sim_map[:10]

		else:
			print 'name ' + n_name + ' is not fould in ' + source_type
    def closest_point(self, f_x, f_y, s_x, s_y, obs_bearing):
        '''
        Calculate the closest point on the line to the feature
        The feature is the point (probably not on the line)
        The line is defined by a point (state x and y) and direction (heading)
        This probably won't return a point that is behind the x-y-bearing.
        Input:
            f_x float (feature's x coordinate)
            f_y float (feature's y coordinate)
            s_x float (robot state's x)
            s_y float (robot state's y)
            obs_bearing float (robot state's heading)
        '''
        origin_to_feature = (f_x - s_x, f_y - s_y, 0.0,)
        line_parallel = unit((cos(obs_bearing), sin(obs_bearing), 0.0))

        # origin_to_feature dot line_parallel = magnitude of otf along line
        magmag = dot_product(origin_to_feature, line_parallel)
        
        if magmag < 0:
            return (s_x, s_y)
        
        scaled_line = scale(line_parallel, magmag)
        scaled_x = scaled_line[0]
        scaled_y = scaled_line[1]

        return (float(s_x + scaled_x), float(s_y + scaled_y))
    def attack(self):

        r = self.r
        rm = self.public_key
        is_dual_code = self.m <= 2 * self.r
        
        if is_dual_code:
            r = self.m - 1 - r 
            rm = rm.orthogonal

        d, rm = gcd_step(rm, r, self.m)
        if d != 1:
            logger.info('performing Minder-Shokrollahi attack...')
            rm_minus_1 = MinderShokrollahi(d, self.m).attack(rm)
            rm = dot_product(rm.orthogonal, rm_minus_1).orthogonal

        else:
            logger.info('skipping Minder-Shokrollahi step...')

        logger.debug('solving P and M matrices...')
        P = find_permutation(rm, self.m)
        if is_dual_code:
            r = self.m - 1 - r

        permuted_rm = rm_code.generator(r, self.m) * P
        M = find_nonsingular(self.public_key, permuted_rm)
        
        return M, P
def TM(filename, k, weights, testset=None):
    _weights = {'fwd':weights['fwd'],
                'bwd':weights['bwd'],
                'fwd_lex':weights['fwd_lex'],
                'bwd_lex':weights['bwd_lex']}
    # if a test set is provided, load all src tirgrams in it
    NGRAM_FILTERING_THRESHOLD = 6
    testset_ngrams = set()
    if testset != None:
        sys.stderr.write("phrase table will be filtered for test set {0}...".format(testset))
        for line in io.open(testset, encoding='utf8', mode='r'):
            tokens = line.strip().split()
            for phrase_len in range(1, NGRAM_FILTERING_THRESHOLD+1):
                for start_index in range(0, len(tokens)-phrase_len):
                    _phrase = ' '.join(tokens[start_index:start_index+phrase_len])
                    testset_ngrams.add(_phrase)
        sys.stderr.write("done. ngrams count = {0}\n".format(len(testset_ngrams)))
        
    sys.stderr.write("Reading translation model from %s...\n" % (filename,))
    tm = {}
    tm_size=0
    for line in io.open(filename, encoding='utf8'):
        (f, e, logprobs) = line.strip().split(" ||| ")
        f_tokens = f.strip().split()
        
        # filter out irrelevant phrase pairs
#        assert(testset != None)
#        sys.stderr.write(u'this thing is weird: {0}'.format(' '.join(f_tokens[0:NGRAM_FILTERING_THRESHOLD])))
        if ' '.join(f_tokens[0:NGRAM_FILTERING_THRESHOLD]) not in testset_ngrams:
            #sys.stderr.write(u'this phrase was eliminated: {0}\n'.format(f))
            continue
        
        if tm_size > 0 and tm_size % 100000 == 0:
            sys.stderr.write('tm_size={0}\n'.format(tm_size))

        logprobs = logprobs.strip().split()
        bwd = log(float(logprobs[0]))
        bwd_lex = log(float(logprobs[1]))
        fwd = log(float(logprobs[2]))
        fwd_lex = log(float(logprobs[3]))
        p = phrase(e, fwd, bwd, fwd_lex, bwd_lex)
        f_tuple = tuple(f_tokens)
        if f_tuple in tm:
            tm[f_tuple].append(p)
        else:
            tm[f_tuple] = [p]
        tm_size += 1

    for f in tm: # prune all but top k translations
        tm_size -= len(tm[f])
        tm[f].sort(key=lambda x: dot_product({'fwd':x.fwd, 
                                              'bwd':x.bwd, 
                                              'fwd_lex':x.fwd_lex, 
                                              'bwd_lex':x.bwd_lex}, _weights))
        del tm[f][k:]
        tm_size += len(tm[f])
    sys.stderr.write('final tm_size={0}\n'.format(tm_size))
    return tm
示例#7
0
def classify_doc_real(t_emb, target_embs, pmode):
	# if not hierarchical
	sim_map = {}
	for key in target_embs:
		if pmode == 'COS':
			sim_map[key] = utils.cossim(t_emb, target_embs[key])
		if pmode == 'DOT':
			sim_map[key] = utils.dot_product(t_emb, target_embs[key])
	sim_map = sorted(sim_map.items(), key=operator.itemgetter(1), reverse=True)
	return sim_map
示例#8
0
    def predict(example):
        o_nodes = learned_net[1]

        # forward pass
        for node in o_nodes:
            in_val = dot_product(example, node.weights)
            node.value = node.activation(in_val)

        # hypothesis
        return find_max_node(o_nodes)
示例#9
0
def LogisticLinearLeaner(dataset, learning_rate=0.01, epochs=100):
    """
    [Section 18.6.4]
    Linear classifier with logistic regression.
    """
    idx_i = dataset.inputs
    idx_t = dataset.target
    examples = dataset.examples
    num_examples = len(examples)

    # X transpose
    X_col = [dataset.values[i] for i in idx_i]  # vertical columns of X

    # add dummy
    ones = [1 for _ in range(len(examples))]
    X_col = [ones] + X_col

    # initialize random weights
    num_weights = len(idx_i) + 1
    w = random_weights(min_value=-0.5, max_value=0.5, num_weights=num_weights)

    for epoch in range(epochs):
        err = []
        h = []
        # pass over all examples
        for example in examples:
            x = [1] + example
            y = sigmoid(dot_product(w, x))
            h.append(sigmoid_derivative(y))
            t = example[idx_t]
            err.append(t - y)

        # update weights
        for i in range(len(w)):
            buffer = [x * y for x, y in zip(err, h)]
            w[i] = w[i] + learning_rate * (dot_product(buffer, X_col[i]) /
                                           num_examples)

    def predict(example):
        x = [1] + example
        return sigmoid(dot_product(w, x))

    return predict
示例#10
0
def LinearLearner(dataset, learning_rate=0.01, epochs=100):
    """
    [Section 18.6.3]
    Linear classifier with hard threshold.
    """
    idx_i = dataset.inputs
    idx_t = dataset.target
    examples = dataset.examples
    num_examples = len(examples)

    # X transpose
    X_col = [dataset.values[i] for i in idx_i]  # vertical columns of X

    # add dummy
    ones = [1 for _ in range(len(examples))]
    X_col = [ones] + X_col

    # initialize random weights
    num_weights = len(idx_i) + 1
    w = random_weights(min_value=-0.5, max_value=0.5, num_weights=num_weights)

    for epoch in range(epochs):
        err = []
        # pass over all examples
        for example in examples:
            x = [1] + example
            y = dot_product(w, x)
            t = example[idx_t]
            err.append(t - y)

        # update weights
        for i in range(len(w)):
            w[i] = w[i] + learning_rate * (dot_product(err, X_col[i]) /
                                           num_examples)

    def predict(example):
        x = [1] + example
        return dot_product(w, x)

    return predict
示例#11
0
    def calculate_score(self, doc_ids_to_scores):
        """Returns a list of (score, doc_id).

        Calculates the dot product between each document's score for each
        feature and the respective feature weights."""
        results = []
        for doc_id, score in doc_ids_to_scores.iteritems():
            doc_score_vector = [score.get(key, 0) for key in
                                self.features_vector_key]

            doc_score = utils.dot_product(
                doc_score_vector, self.features_weights)

            results.append((doc_score, doc_score_vector, doc_id))
        return results
示例#12
0
    def predict(example):
        # input nodes
        i_nodes = learned_net[0]

        # activate input layer
        for v, n in zip(example, i_nodes):
            n.value = v

        # forward pass
        for layer in learned_net[1:]:
            for node in layer:
                inc = [n.value for n in node.inputs]
                in_val = dot_product(inc, node.weights)
                node.value = node.activation(in_val)

        # hypothesis
        o_nodes = learned_net[-1]
        prediction = find_max_node(o_nodes)
        return prediction
示例#13
0
 def score_one(self, rest, avg_number_of_ratings):
     """
     Calculates the overall score for a single restaurant, out of 10
     :param rest: Obj representing the restaurant to calculate a score for
     :param avg_number_of_ratings: The average number of ratings received by restaurants in this
         batch
     :return: The overall score of the restaurant, out of 10
     """
     scores = [
         self.calc_location_score(rest),
         self.calc_rating_score(rest, avg_number_of_ratings),
         self.calc_price_score(rest)
     ]
     print(
         f"Restaurant: {rest.name}\nRatings (score, num): {[rest.rating, rest.num_ratings]}\nScores (loc, rating, price): {scores}\n\n"
     )
     final_score = dot_product(self.weights, scores) * self.MAX_SCORE / sum(
         self.weights)
     return final_score
示例#14
0
def build_actor_actor_matrix(actors_tag_vector):
    """
    Build actor similarity matrix based on the actors_tag_vector.
    :param actors_tag_vector: actors in tags vector space
    :return: actor_matrix, 2D matrix describes the actor similarity
             actors, sorted_actor_list, you can get (index in matrix)->(actor id)
             actors_index, actor_index_dict, you can get (actor id)->(index in matrix)
    """
    actors = sorted(actors_tag_vector.keys())
    actors_index = {}

    for i, v in enumerate(actors):
        actors_index[v] = i

    actor_matrix = zeros((len(actors), len(actors)))

    for i, _actor in enumerate(actors):
        for j, actor in enumerate(actors):
            actor_matrix[i][j] = dot_product(actors_tag_vector[_actor],
                                             actors_tag_vector[actor])[0]

    return actor_matrix, actors, actors_index
示例#15
0
def build_movie_movie_matrix(movies_tag_vector):
    """
    Build movie similarity matrix based on the movies_tag_vector.
    :param movies_tag_vector: movies in tags vector space
    :return: movie_matrix, 2D matrix describes the movie similarity
             movies, sorted_movie_list, you can get (index in matrix)->(movie id)
             movies_index, movie_index_dict, you can get (movie id)->(index in matrix)
    """

    movies = sorted(movies_tag_vector.keys())
    movies_index = {}

    for i, v in enumerate(movies):
        movies_index[v] = i

    movie_matrix = zeros((len(movies), len(movies)))
    for i, _movie in enumerate(movies):
        for j, movie in enumerate(movies):
            movie_matrix[i][j] = dot_product(movies_tag_vector[_movie],
                                             movies_tag_vector[movie])[0]

    return movie_matrix, movies, movies_index
示例#16
0
    def along_axis_error(self, location, goal):
        """
        calc error along the axis defined by the goal position and direction

        input: two nav_msgs.msg.Odometry, current best location estimate + goal
        output: double distance along the axis

        axis is defined by a vector from the unit circle aligned with the goal
         heading
        relative position is the vector from the goal x, y to the location x, y

        distance is defined by the dot product

        example use:
        see calc_errors above
        """
        relative_position_x = (goal.pose.pose.position.x -
            location.pose.pose.position.x)
        relative_position_y = (goal.pose.pose.position.y -
            location.pose.pose.position.y)

        # relative position of the best estimate position and the goal
        # vector points from the goal to the location
        relative_position = (relative_position_x, relative_position_y, 0.0)

        goal_heading = quaternion_to_heading(goal.pose.pose.orientation)
        goal_vector_x = math.cos(goal_heading)
        goal_vector_y = math.sin(goal_heading)

        # vector in the direction of the goal heading, axis of desired motion
        goal_vector = (goal_vector_x, goal_vector_y, 0.0)

        val = dot_product(relative_position, goal_vector)

        if abs(val) < .0001:
            return 0.0

        return -val
示例#17
0
def find_related_actors_of_a_movie(movie_id, movie_actor_list,
                                   movie_tfidf_tags, movie_tags,
                                   actor_tfidf_tags, actor_tags, mode):
    """
    Task 1d, given a movie, finding the top10 most related actors who have not acted in the movie, leveraging the given
    movie's TF-IDF tag vectors, top 5 latent semantics in the space of tags.
    :param movie_id: Movie id given by user
    :param movie_actor_list: A list contains the actors acted in this movie
    :param movie_tfidf_tags: Movie mapped into tf-idf tags vector
    :param movie_tags: Movie mapped into raw tags vector
    :param actor_tfidf_tags: Actors mapped into tf-idf tags vector
    :param actor_tags: Actors mapped into raw tags vector
    :return:
    """
    if movie_id not in movie_tags:
        print('invalid movieid')
        return

    if mode == 'TF-IDF':

        result = {}
        for actor in actor_tfidf_tags:
            if actor in movie_actor_list[movie_id]:
                continue

            dot_product, cosine = lib.dot_product(actor_tfidf_tags[actor],
                                                  movie_tfidf_tags[movie_id])
            # print(math.acos(cosine)/math.pi * 180)
            result[actor] = dot_product

        for r in sorted(result.items(), key=lambda x: x[1], reverse=True)[:10]:
            print('actor_id: ', r[0], 'dot_product: ', r[1])
        # sort cosine output top-10
    elif mode == 'SVD':
        # construct a matrix from movie_id, actors tags
        a, row_name, column_name = dict_to_matrix(movie_tfidf_tags[movie_id],
                                                  actor_tfidf_tags)

        # SVD
        u, s, v = np.linalg.svd(a, full_matrices=False)

        result = {}
        # select top-5 latent semantics indies
        # print(np.allclose(a[0], np.dot(u[0], np.dot(np.diag(s), v))))

        for i, actor in enumerate(row_name):
            if actor in movie_actor_list[movie_id]:
                continue
            result[actor] = np.dot(u[0][:5], u[i + 1][:5])

        for r in sorted(result.items(), key=lambda x: x[1], reverse=True)[:10]:
            print('actor_id: ', r[0], 'dot_product: ', r[1])
    elif mode == 'PCA':
        # construct a matrix from movie_id, actors tags
        a, row_name, column_name = dict_to_matrix(movie_tfidf_tags[movie_id],
                                                  actor_tfidf_tags)

        # PCA
        pca = PCA()
        u = pca.fit_transform(a)

        result = {}
        # select top-5 latent semantics indies

        for i, actor in enumerate(row_name):
            if actor in movie_actor_list[movie_id]:
                continue
            result[actor] = np.dot(u[0][:5], u[i + 1][:5])

        for r in sorted(result.items(), key=lambda x: x[1], reverse=True)[:10]:
            print('actor_id: ', r[0], 'dot_product: ', r[1])
    elif mode == 'LDA':
        movie = movie_tags[movie_id]
        for actor in actor_tags:
            for tag in actor_tags[actor]:
                if tag not in movie:
                    movie[tag] = 0

        model, topic_terms = generate_model(movie)
        latent_semantics = get_top_10_using_LDA_model(
            movie_actor_list[movie_id], model, topic_terms, actor_tags)
        for actor in sorted(latent_semantics.items(),
                            key=lambda x: x[1],
                            reverse=True)[:10]:
            print('actor_id: ', actor[0], ' dot_product: ', actor[1])
    else:
        print('unsupported mode')
示例#18
0
def BackPropagationLearner(dataset,
                           net,
                           learning_rate,
                           epochs,
                           activation=sigmoid):
    """
    [Figure 18.23]
    The back-propagation algorithm for multilayer networks.
    """
    # initialise weights
    for layer in net:
        for node in layer:
            node.weights = random_weights(min_value=-0.5,
                                          max_value=0.5,
                                          num_weights=len(node.weights))

    examples = dataset.examples
    # As of now dataset.target gives an int instead of list,
    # Changing dataset class will have effect on all the learners.
    # Will be taken care of later.
    o_nodes = net[-1]
    i_nodes = net[0]
    o_units = len(o_nodes)
    idx_t = dataset.target
    idx_i = dataset.inputs
    n_layers = len(net)

    inputs, targets = init_examples(examples, idx_i, idx_t, o_units)

    for epoch in range(epochs):
        # iterate over each example
        for e in range(len(examples)):
            i_val = inputs[e]
            t_val = targets[e]

            # activate input layer
            for v, n in zip(i_val, i_nodes):
                n.value = v

            # forward pass
            for layer in net[1:]:
                for node in layer:
                    inc = [n.value for n in node.inputs]
                    in_val = dot_product(inc, node.weights)
                    node.value = node.activation(in_val)

            # initialize delta
            delta = [[] for _ in range(n_layers)]

            # compute outer layer delta

            # error for the MSE cost function
            err = [t_val[i] - o_nodes[i].value for i in range(o_units)]

            # calculate delta at output
            if node.activation == sigmoid:
                delta[-1] = [
                    sigmoid_derivative(o_nodes[i].value) * err[i]
                    for i in range(o_units)
                ]
            elif node.activation == relu:
                delta[-1] = [
                    relu_derivative(o_nodes[i].value) * err[i]
                    for i in range(o_units)
                ]
            elif node.activation == tanh:
                delta[-1] = [
                    tanh_derivative(o_nodes[i].value) * err[i]
                    for i in range(o_units)
                ]
            elif node.activation == elu:
                delta[-1] = [
                    elu_derivative(o_nodes[i].value) * err[i]
                    for i in range(o_units)
                ]
            else:
                delta[-1] = [
                    leaky_relu_derivative(o_nodes[i].value) * err[i]
                    for i in range(o_units)
                ]

            # backward pass
            h_layers = n_layers - 2
            for i in range(h_layers, 0, -1):
                layer = net[i]
                h_units = len(layer)
                nx_layer = net[i + 1]

                # weights from each ith layer node to each i + 1th layer node
                w = [[node.weights[k] for node in nx_layer]
                     for k in range(h_units)]

                if activation == sigmoid:
                    delta[i] = [
                        sigmoid_derivative(layer[j].value) *
                        dot_product(w[j], delta[i + 1]) for j in range(h_units)
                    ]
                elif activation == relu:
                    delta[i] = [
                        relu_derivative(layer[j].value) *
                        dot_product(w[j], delta[i + 1]) for j in range(h_units)
                    ]
                elif activation == tanh:
                    delta[i] = [
                        tanh_derivative(layer[j].value) *
                        dot_product(w[j], delta[i + 1]) for j in range(h_units)
                    ]
                elif activation == elu:
                    delta[i] = [
                        elu_derivative(layer[j].value) *
                        dot_product(w[j], delta[i + 1]) for j in range(h_units)
                    ]
                else:
                    delta[i] = [
                        leaky_relu_derivative(layer[j].value) *
                        dot_product(w[j], delta[i + 1]) for j in range(h_units)
                    ]

            # update weights
            for i in range(1, n_layers):
                layer = net[i]
                inc = [node.value for node in net[i - 1]]
                units = len(layer)
                for j in range(units):
                    layer[j].weights = vector_add(
                        layer[j].weights,
                        scalar_vector_product(learning_rate * delta[i][j],
                                              inc))

    return net
示例#19
0
 def predict(example):
     x = [1] + example
     return sigmoid(dot_product(w, x))
示例#20
0
 def predict(example):
     x = [1] + example
     return dot_product(w, x)
def dot_and_cosine(v1, v2):
    return utils.dot_product(v1, v2)[0]