示例#1
0
def smooth(values, dim, ann):
    new_values = np.ndarray([len(values)])
    for i in range(len(values)):
        neighbours = list(get_neighbours(i, dim, ann))
        avg = sum([values[n] for n in neighbours]) / len(neighbours)
        new_values[i] = avg
    return new_values
示例#2
0
def hillClimb(search_space):
    # random solution
    x = random.choice(search_space)
    local_optimum = x
    while True:
        neighbours = utils.get_neighbours(x)
        for neighbor in neighbours:
            better_optimum = False
            if eval(neighbor) > eval(local_optimum):
                local_optimum = neighbor
                print(local_optimum)
                better_optimum = True
        if not better_optimum:
            return local_optimum
示例#3
0
    def __init__(self, ID, network):
        self.id = ID
        self.N = network.N
        self.coords = id_to_coords(self.id, self.N)
        self.threshold = network.threshold
        self.activity = 0
        self.spiking = False
        self.network = network
        # current info, sent to neighbours while diffusing
        self.info = np.zeros((self.N, self.N))
        # updated info, use separate board to avoid diffusing information we just got
        self.new_info = np.zeros_like(self.info)

        self.neighbourhood = get_neighbours(*self.coords, self.N)
示例#4
0
文件: main.py 项目: in7erval/n-puzzle
def a_star_search(start, goal):
    frontier = PriorityQueue()
    frontier.put(start, 0)
    came_from = dict()
    cost_so_far = dict()
    came_from[start] = None
    cost_so_far[start] = 0

    while not frontier.empty():
        current = frontier.get()
        if current == goal:
            break
        for n in get_neighbours(current):
            new_cost = cost_so_far[current] + 1
            if n not in cost_so_far or new_cost < cost_so_far[n]:
                cost_so_far[n] = new_cost
                # priority = new_cost
                priority = heuristic2(goal, n)
                # priority = priority + heuristic(goal, n)
                frontier.put(n, priority)
                came_from[n] = current
    return came_from, cost_so_far
def get_embeddings(input_file,
                   output_folder,
                   directed=False,
                   walks_per_node=10,
                   steps=80,
                   size=300,
                   window=10,
                   workers=1,
                   verbose=True):
    """
    Performs uniform random walks on given graph and generates its embeddings.

    :param input_file: Path to a file containing an edge list of a graph (str). 
    :param output_folder: Directory where the embeddings will be stored (str).
    :param directed: True if the graph is directed (bool).
    :param walks_per_node: How many random walks will be performed from each node (int).
    :param steps: How many node traversals will be performed for each random walk (int).
    :param size: Base dimensionality of the embedding vector. Should be divisable by 6 (int).
    :param window: The window parameter for the word2vec model (i.e. maximum distance in a random walk where one node can be considered the another node's context) (int).
    :param workers: Number of threads to use when training the word2vec model (int).
    :param verbose: Whether to print progress messages to stdout (bool).
    """

    if verbose:
        print("Getting the graph")
    graph = get_graph(input_file, directed)

    if verbose:
        print("Getting the neighbours' dictionary")
    neighbours = get_neighbours(graph)

    if verbose:
        print("Getting the random walks")
    random_walks = get_random_walks(neighbours, walks_per_node, steps)

    if verbose:
        print("Getting the embeddings")
        print(size)
    model = gensim.models.Word2Vec(random_walks,
                                   min_count=0,
                                   size=size,
                                   window=window,
                                   iter=1,
                                   sg=1,
                                   workers=workers)
    model.wv.save_word2vec_format(
        os.path.join(output_folder, 'embeddings_' + str(size) + '.csv'))

    if verbose:
        print(int(size / 2))
    model = gensim.models.Word2Vec(random_walks,
                                   min_count=0,
                                   size=int(size / 2),
                                   window=window,
                                   iter=1,
                                   sg=1,
                                   workers=workers)
    model.wv.save_word2vec_format(
        os.path.join(output_folder,
                     'embeddings_' + str(int(size / 2)) + '.csv'))

    if verbose:
        print(int(size / 3))
    model = gensim.models.Word2Vec(random_walks,
                                   min_count=0,
                                   size=int(size / 3),
                                   window=window,
                                   iter=1,
                                   sg=1,
                                   workers=workers)
    model.wv.save_word2vec_format(
        os.path.join(output_folder,
                     'embeddings_' + str(int(size / 3)) + '.csv'))
def get_embeddings(input_file, output_folder, directed=False, walks_per_node=10, steps=80,
                   size=300, window=10, workers=1, metric='jaccard', verbose=True):  
    """
    Performs non-uniform random walks (on neighboring nodes) on given graph and generates its embeddings.

    :param input_file: Path to a file containing an edge list of a graph (str). 
    :param output_folder: Directory where the embeddings will be stored (str).
    :param directed: True if the graph is directed (bool).
    :param walks_per_node: How many random walks will be performed from each node (int).
    :param steps: How many node traversals will be performed for each random walk (int).
    :param size: Dimensionality of the embedding vector. Should be divisable by 6  (int).
    :param window: The window parameter for the word2vec model (i.e. maximum distance in a random walk where one node can be considered the another node's context) (int).
    :param workers: Number of threads to use when training the word2vec model (int).
    :param metric: The metric which will be used to generate similarities (str).
    :param verbose: Whether to print progress messages to stdout (bool).
    """

    if verbose:
        print("Getting the graph")
    graph = get_graph(input_file, directed)
        
    if verbose:
        print("Getting the neighbours' dictionary")
    neighbours = get_neighbours(graph)
    
    if verbose:
        print("Getting the similarities")

    if metric == "common_neighbours":
        similarities = get_similarities_common_neighbours(neighbours)
    elif metric == 'jaccard':
        similarities = get_similarities_jaccard(neighbours)
    elif metric == 'euclidean':
        adjacency_dictionary = get_adjacency(graph)
        similarities = get_similarities_euclidean(neighbours, adjacency_dictionary)
    elif metric == 'cosine':
        adjacency_dictionary = get_adjacency(graph)
        similarities = get_similarities_cosine(neighbours, adjacency_dictionary)
    elif metric == 'pearson':
        adjacency_dictionary = get_adjacency(graph)
        similarities = get_similarities_pearson(neighbours, adjacency_dictionary)
    else:
        raise ValueError("Invalid value for parameter 'metric'.\n" + \
                         "Should be one of: 'common_neighbours', 'jaccard', 'euclidean', 'cosine', 'pearson'")
    if verbose:
        print("Getting the random walks")
    random_walks = get_random_walks(neighbours, similarities, walks_per_node, steps)

    if verbose:
        print("Getting the embeddings")
        print(size)
    model = gensim.models.Word2Vec(random_walks, min_count=0, size=size, window=window, iter=1, sg=1, workers=workers)
    model.wv.save_word2vec_format(os.path.join(output_folder, 'embeddings_' + str(size) + '.csv'))
    
    if verbose:
        print(int(size/2))
    model = gensim.models.Word2Vec(random_walks, min_count=0, size=int(size/2), window=window, iter=1, sg=1, workers=workers)
    model.wv.save_word2vec_format(os.path.join(output_folder, 'embeddings_' + str(int(size/2)) + '.csv'))
    
    if verbose:
        print(int(size/3))
    model = gensim.models.Word2Vec(random_walks, min_count=0, size=int(size/3), window=window, iter=1, sg=1, workers=workers)
    model.wv.save_word2vec_format(os.path.join(output_folder, 'embeddings_' + str(int(size/3)) + '.csv'))
示例#7
0
    def setup(self):
        self.training_bool = tf.placeholder(tf.bool, name='training_bool')
        self.images = tf.placeholder(
            tf.float32, [None] + self.image_shape, name='real_images')

        # 2x4 calculates mean of pixels!
        self.lowres_images = tf.reduce_mean(tf.reshape(self.images,
            [self.batch_size, self.lowres_size, self.lowres,
             self.lowres_size, self.lowres, self.c_dim]), [2, 4])

        # initial distribution that G(z) uses
        self.z = tf.placeholder(tf.float32, [None, self.z_dim], name='z')
        self.z_sum = tf.summary.histogram("z", self.z)

        #intialize instance of generator + lowres generator for contextual loss calculation
        self.G = self.generator(self.z)
        self.lowres_G = tf.reduce_mean(tf.reshape(self.G,
            [self.batch_size, self.lowres_size, self.lowres,
             self.lowres_size, self.lowres, self.c_dim]), [2, 4])

        #need two discriminators, one for batches of images from our training data, one for
        #batches of images outputted by gen
        self.D, self.D_logits = self.discriminator(self.images)
        self.D_, self.D_logits_ = self.discriminator(self.G, reuse=True)

        self.d_sum = tf.summary.histogram("d", self.D)
        self.d__sum = tf.summary.histogram("d_", self.D_)
        self.G_sum = tf.summary.image("G", self.G)

        #discriminator loss for training data: cross entropy btwn discriminator pred and all ones (bc real)
        self.d_loss_real = tf.reduce_mean(
            tf.nn.sigmoid_cross_entropy_with_logits(logits=self.D_logits,
                                                    labels=tf.ones_like(self.D)))

        #discriminator loss for generator outputs: cross entropy btwn discriminator pred and all zeros (bc fake)
        self.d_loss_fake = tf.reduce_mean(
            tf.nn.sigmoid_cross_entropy_with_logits(logits=self.D_logits_,
                                                    labels=tf.zeros_like(self.D_)))

        # generator loss wants D to be wrong! and D says all real!
        self.g_loss = tf.reduce_mean(
            tf.nn.sigmoid_cross_entropy_with_logits(logits=self.D_logits_,
                                                    labels=tf.ones_like(self.D_)))

        #generator loss: cross entropy between discriminator and all ones (want to fool discriminator
        #into thinking its outputs are real)
        self.d_loss_real_sum = tf.summary.scalar("d_loss_real", self.d_loss_real)
        self.d_loss_fake_sum = tf.summary.scalar("d_loss_fake", self.d_loss_fake)

        #sum discriminator losses
        self.d_loss = self.d_loss_real + self.d_loss_fake

        self.g_loss_sum = tf.summary.scalar("g_loss", self.g_loss)
        self.d_loss_sum = tf.summary.scalar("d_loss", self.d_loss)

        t_vars = tf.trainable_variables()

        self.d_vars = [var for var in t_vars if 'd_' in var.name]
        self.g_vars = [var for var in t_vars if 'g_' in var.name]

        self.saver = tf.train.Saver(max_to_keep=1)

        #completion variable shtuff

        #mask to be completed (1s and 0s in shape of image)
        self.mask = tf.placeholder(tf.float32, self.image_shape, name='mask')
        self.lowres_mask = tf.placeholder(tf.float32, self.lowres_shape, name='lowres_mask')



        self.weighted_contextual_loss = np.full(self.image_shape, 0.5, dtype=np.float32)
        for row_index in range(self.image_shape[0]):
            for col_index in range(self.image_shape[1]):
                # make masked
                if self.mask[row_index][col_index] == [0, 0, 0]:
                    self.weighted_contextual_loss[row_index][col_index] = [0.0, 0.0, 0.0]
                else: # not masked, if near mask then > 0.5 weight, add 0.5 for every closeby
                    weight = get_neighbours(row_index, col_index, self.mask)
                    self.weighted_contextual_loss[row_index][col_index] += [weight, weight, weight]

        #define contextual loss as pixel difference between mask * generator output and mask * image to infill
        # added weighted_contextual_loss
        # self.contextual_loss = tf.reduce_sum(
        #     tf.contrib.layers.flatten(
        #         tf.abs(tf.multiply(self.weighted_contextual_loss, tf.multiply(self.mask, self.G) - tf.multiply(self.mask, self.images))), 1))


        # mi = tf.multiply(self.mask, self.images)
        # mG = tf.multiply(self.mask, self.G)
        # diff = mG - mi
        # wdiff = tf.multiply(self.weighted_contextual_loss, diff)
        # self.contextual_loss = tf.reduce_sum(tf.contrib.layers.flatten(tf.abs(wdiff)), 1)

        mi = tf.multiply(self.mask, self.images)
        mG = tf.multiply(self.mask, self.G)
        # mi = tf.multiply(self.weighted_contextual_loss, self.images)
        # mG = tf.multiply(self.weighted_contextual_loss, self.G)
        diff = mG - mi
        self.contextual_loss = tf.reduce_sum(tf.contrib.layers.flatten(tf.abs(diff)), 1)


        #as suggested by GAN implementations, add on same pixel difference for low res versions to include "bigger picture"
        self.contextual_loss += tf.reduce_sum(
            tf.contrib.layers.flatten(
                tf.abs(tf.multiply(self.lowres_mask, self.lowres_G) - tf.multiply(self.lowres_mask, self.lowres_images))), 1)

        # improve loss function to help smooth the mask boundary
        # TODO only works for center mask

        l = int(self.image_size*self.center_scale)
        u = int(self.image_size*(1.0-self.center_scale))

        # take G(z) the pixels in the outer part inside the mask
        generated_inner_borderline = np.zeros(self.image_shape).astype(np.float32)

        generated_inner_borderline[l, l:u, :] = 1.0
        generated_inner_borderline[l:u, u-1, :] = 1.0
        generated_inner_borderline[u-1, l:u, :] = 1.0
        generated_inner_borderline[l:u, l, :] = 1.0

        # take a second cut out inside the mask
        second_inner_borderline = np.zeros(self.image_shape).astype(np.float32)
        second_inner_borderline[l+1, l:u, :] = 1.0
        second_inner_borderline[l:u, u-2, :] = 1.0
        second_inner_borderline[u-2, l:u, :] = 1.0
        second_inner_borderline[l:u, l+1, :] = 1.0

        masked_image_outerborder = np.zeros(self.image_shape).astype(np.float32)
        masked_image_outerborder[l-1, l:u, :] = 1.0
        masked_image_outerborder[l:u, u, :] = 1.0
        masked_image_outerborder[u, l:u, :] = 1.0
        masked_image_outerborder[l:u, l-1, :] = 1.0

        # print(tf.abs(tf.multiply(generated_inner_borderline, self.G)))
        # print(tf.abs(tf.multiply(masked_image_outerborder, self.images)))

        self.blending_loss = tf.reduce_sum(
            tf.contrib.layers.flatten(
                tf.abs(tf.multiply(generated_inner_borderline, self.G) - tf.multiply(masked_image_outerborder, self.images))), 1)

        self.blending_loss += tf.reduce_sum(
            tf.contrib.layers.flatten(
                tf.abs(tf.multiply(second_inner_borderline, self.G) - tf.multiply(masked_image_outerborder, self.images))), 1)

        #to make sure we don't pick a G(z) that just doesn't look realistic, include perceptual loss (same loss as generator)
        #can be thought of as ensuring this G(z) fools the discriminator
        self.perceptual_loss = self.g_loss
        self.complete_loss = self.contextual_loss + self.lamda*self.perceptual_loss + self.blending_loss
        #we will minimize loss function L = c + wz using gradient descent
        self.grad_complete_loss = tf.gradients(self.complete_loss, self.z)