Exemplo n.º 1
0
def diffusion_payoff(s1, s2):
    iterations = 5000
    r = np.zeros(iterations)
    for i in range(iterations):
        g = tools.load_graph(data_set)
        tools.diffuse(g, s1, s2)
        r[i] = len(g.graph["1"]) - len(g.graph["2"])
    return r
Exemplo n.º 2
0
def competition(data_set, nodes):
    iterations = 1000
    r = np.zeros(iterations)
    for i in range(iterations):
        a1 = ca.action_v_each(k, 2, nodes)
        a2 = ca.action_v_each(k, 1, nodes)
        s1, s2 = get_seeds(a1, a2)
        g = tools.load_graph(data_set)
        tools.diffuse(g, s1, s2)
        r[i] = len(g.graph["1"]) - len(g.graph["2"])
    return r
Exemplo n.º 3
0
def competition(a1, a2):
    s1, s2 = get_seeds(a1, a2)
    g = tools.load_graph(data_set)
    tools.diffuse(g, s1, s2)
    return len(g.graph["1"]) - len(g.graph["2"])
Exemplo n.º 4
0
import numpy as np
import networkx as nx
import tools
import TIM
import time

start_time = time.time()

k = 50
data_set = "wiki_vote"
result_folder = "../results/graph/" + data_set + "/"

g = tools.load_graph(data_set)
theta = TIM.calculate_theta(g, k)

g = tools.load_graph(data_set)
cover_graph = TIM.create_rr_sets_graph(g, theta)

end_time = time.time()

nx.write_gpickle(cover_graph, result_folder + "rr_sets_graph")

file = open(result_folder + "result.txt", "w")
file.write("data set: " + data_set + "\n")
file.write("k: " + str(k) + "\n")
file.write("theta: " + str(theta) + "\n")
file.write("run time: " + str(end_time - start_time) + "\n")
file.close()
def label_images(snapcat_json):

    model_file = settings.graph['graph']
    label_file = settings.graph['labels']
    input_height = settings.graph['input_height']
    input_width = settings.graph['input_width']
    input_mean = settings.graph['input_mean']
    input_std = settings.graph['input_std']
    input_layer = settings.graph['input_layer']
    output_layer = settings.graph['output_layer']

    graph = tools.load_graph(model_file)
    labels = tools.load_labels(label_file)

    # Create Full path
    # todo, should this be outside the loop, one TF session and then parse all the images?
    with tf.Session(graph=graph) as sess:

        for image in snapcat_json.json_data:

            file = snapcat_json.json_data[image]["path"]

            if not os.path.isfile(file):
                print("***************ERROR - File doesn't exist:", file)
                continue

            img = cv2.imread(file)
            # resize area of interest for classification
            #TODO - modify for multiple areas of interest
            try:
                area_of_interest = snapcat_json.json_data[image][
                    "area_of_interest"]
                x1 = area_of_interest[0]
                x2 = area_of_interest[1]
                y1 = area_of_interest[2]
                y2 = area_of_interest[3]
                img = img[y1:y2, x1:x2, :]
            except:
                pass

            resized_image = cv2.resize(img, (input_width, input_height))
            t = (np.float32(resized_image) - input_mean) / input_std

            input_name = "import/" + input_layer
            output_name = "import/" + output_layer
            input_operation = graph.get_operation_by_name(input_name)
            output_operation = graph.get_operation_by_name(output_name)

            # todo, suspect this is what's printing a lot of messages. Attempt to consolidate calls to this function
            results = sess.run(output_operation.outputs[0],
                               {input_operation.outputs[0]: [t]})

            results = np.squeeze(results)

            # get classification
            top_k = results.argsort()[-5:][::-1]
            for i in top_k:

                # if confidence level is below certain value, put in "unsure" folder
                print("%s: %f" % (labels[i], results[i]))
                if results[i] >= settings.sort_image[
                        'cat_confidence_threshold'] and labels[i] == 'cats':
                    print("cat")
                    snapcat_json.update(
                        image, "classifier_label", "cat"
                    )  #TODO - classifier_label will be associated with an area of interest
                elif results[i] >= settings.sort_image[
                        'not_cat_confidence_threshold'] and labels[
                            i] == 'not cats':
                    print("not cat")
                    snapcat_json.update(
                        image, "classifier_label", "not_cat"
                    )  #TODO - classifier_label will be associated with an area of interest
                else:
                    print("unsure")
                    snapcat_json.update(
                        image, "classifier_label", "unsure"
                    )  #TODO - classifier_label will be associated with an area of interest

                break

        snapcat_json.save()
Exemplo n.º 6
0
def label_images(snapcat_json):

    model_file = settings.graph['graph']
    label_file = settings.graph['labels']
    input_height = settings.graph['input_height']
    input_width = settings.graph['input_width']
    input_mean = settings.graph['input_mean']
    input_std = settings.graph['input_std']
    input_layer = settings.graph['input_layer']
    output_layer = settings.graph['output_layer']

    graph = tools.load_graph(model_file)
    labels = tools.load_labels(label_file)

    # Create Full path
    # todo, should this be outside the loop, one TF session and then parse all the images?
    with tf.Session(graph=graph) as sess:

        for image in snapcat_json.json_data:

            file = snapcat_json.json_data[image]["path"]

            if not os.path.isfile(file):
                print("***************ERROR - File doesn't exist:", file)
                continue

            # resize area of interest for classification
            area_of_interest = snapcat_json.json_data[image][
                "area_of_interest"]
            x1 = area_of_interest[0]
            x2 = area_of_interest[1]
            y1 = area_of_interest[2]
            y2 = area_of_interest[3]

            img = cv2.imread(file)
            img = img[y1:y2, x1:x2, :]

            resized_image = cv2.resize(img, (224, 224))
            resized_image = img_as_ubyte(resized_image)

            # TODO - we need to load the area of interest and resize the image here rather than read the file:
            tempfile = "tempfile.jpg"
            cv2.imwrite(tempfile, resized_image)
            cv2.destroyAllWindows()

            # TODO - this loads a tensorflow session resulting in a bunch of debug output. Probably will be more efficient to leave the session open if possible
            t = tools.read_tensor_from_image_file(tempfile,
                                                  input_height=input_height,
                                                  input_width=input_width,
                                                  input_mean=input_mean,
                                                  input_std=input_std)

            input_name = "import/" + input_layer
            output_name = "import/" + output_layer
            input_operation = graph.get_operation_by_name(input_name)
            output_operation = graph.get_operation_by_name(output_name)

            # todo, suspect this is what's printing a lot of messages. Attempt to consolidate calls to this function
            results = sess.run(output_operation.outputs[0],
                               {input_operation.outputs[0]: t})

            results = np.squeeze(results)

            # get classification
            top_k = results.argsort()[-5:][::-1]
            for i in top_k:

                # if confidence level is below certain value, put in "unsure" folder
                print("%s: %f" % (labels[i], results[i]))
                if results[i] >= settings.sort_image[
                        'cat_confidence_threshold'] and labels[i] == 'cats':
                    print("cat")
                    snapcat_json.update(image, "classifier_label", "cat")
                elif results[i] >= settings.sort_image[
                        'not_cat_confidence_threshold'] and labels[
                            i] == 'not cats':
                    print("not cat")
                    snapcat_json.update(image, "classifier_label", "not_cat")
                else:
                    print("unsure")
                    snapcat_json.update(image, "classifier_label", "unsure")

                break

        snapcat_json.save()