def __init__(self, rgb_path, dem_path, clip_path, Settings, platform='linux'):
		"""
		Arguments
		---------
		rgb_path : str (path object)
			Path to input RGB tif.
		dem_path : str (path object)
			Path to input DEM tif.
		clip_path : str(path object)
			Path to clipped field shapefile.
		Settings : settings.DetectionSettings
			Object containing all parameters.
		platform : str, optional
			Either 'linux' or 'windows', necessary because keras models
			are loaded differently in windows. Default is 'linux'.
		"""

		self.rgb_path = rgb_path
		self.dem_path = dem_path
		self.clip_path = clip_path
		self.Settings = Settings
		self.platform = platform

		self.network = load_network(self.Settings.model_path, platform)
		self.rgb_input_shape, self.dem_input_shape = get_input_sizes(self.network)
		self.num_classes = get_num_classes(self.network)

		self.Rasters = RasterCommunicator(self.rgb_path, self.dem_path, self.clip_path)

		self.detected_crops = {}
		self.bg_dict = {}
Beispiel #2
0
def main():
    network_file = sys.argv[1]
    output_file = sys.argv[2]

    try:
        net, freq = network.load_network(network_file)
        print " Recompiling and saving prediction function."
        network.save_prediction_function(net, output_file, freq)
    except ValueError:
        print " Error loading network, trying prediction function instead."
Beispiel #3
0
def main():
    network = load_network("resources/network.txt", NUMBER_SOURCES)
    load_songs("resources/songs_already_visited.txt", NUMBER_SOURCES, network)

    playlist = [
        "Jon Hopkins", "Radiohead", "Tame Impala", "Bonobo", "Coldplay",
        "Bon Iver", "Nirvana", "David Bowie", "The xx"
    ]

    suggestions = get_suggestions(required_suggestions=10,
                                  required_unique_bands=20,
                                  playlist=playlist,
                                  network=network)
    print[el[0].get_title() + " by " + el[1].get_title() for el in suggestions]
Beispiel #4
0
def run_game():
    net = network.load_network("pop4_gen10.npz")
    turn = 0
    turn_count = 0
    brd = board.Board()
    while turn_count < 40:
        turn = board.next_move(turn)
        turn_count += 1
        if turn % 2 == 1:
            brd = player_turn(turn, brd)
        else:
            #            brd = minimax_turn(2, turn, brd)
            brd = player_two(turn, brd, net)
        if brd.longest_run_1 == 4 or brd.longest_run_2 == 4:
            print(game_over(brd))
            return game_over(brd)
    return game_over()
    results = {}
    results["prediction"] = np.argmax(predictiondistribution, axis=1)
    results["correctly_classified"] = np.nonzero(results["prediction"] == targetlabels)[0]
    results["incorrectly_classified"] = np.nonzero(results["prediction"] != targetlabels)[0]
    results["fraction_correct"] = results["correctly_classified"].size / targetlabels.shape[0]
    results["fraction_incorrect"] = 1 - results["fraction_correct"]

    return results

def show_network_resuls(network):
    """Helper function to quickly show the first 10 digits in the validation set for a network object"""
    images_val, labels_val = preprocess.load_mnist(dataset='testing')
    validationoutput = network.eval_fn(images_val)
    results = analyze_results(images_val, validationoutput, labels_val)
    ind = np.arange(10)
    print("Validation error: {}".format(results["fraction_incorrect"]))

    # postprocess.plot_multiple_digits(images_val[ind,:,:], results["prediction"][ind])
    plot_images_with_probabilities(images_val[ind,:,:], validationoutput[ind,:])

if __name__ == "__main__":
    # path = 'data'
    #
    # images, labels = preprocess.load_mnist(path=path)
    # # results = analyze_results(images, )
    # plot_multiple_digits(images[0:30,:,:], labels[0:30])

    nn = network.load_network(os.path.join("networks", "test"))
    show_network_resuls(nn)
Beispiel #6
0
    tests.test_mutation_strength(number_of_tests=[0.5, 2.5, 5.0, 7.5, 13.5])
    tests.test_crossover(iterations=400,
                         population_size=20,
                         mutation_strength=2.5,
                         start_crossover_probability=0.1,
                         crossover_probability_step=0.3)
    tests.test_population_size(iterations=400,
                               start_population_size=20,
                               population_size_step=10,
                               mutation_strength=2.5,
                               crossover_probability=0.1)
    # Section responsible for single individual training
    from game import Game
    from network import Network, load_network
    from evolutionary_algorithm import initialize, evolve

    game = Game(seed=1)
    Network.set_seed(1)
    population = initialize(30, [5, 10, 1])
    results = []
    best_individual = evolve(game.play,
                             population,
                             3.5,
                             0.1,
                             400,
                             results,
                             verbose=True)
    best_individual.save_network('Net.json')
    best_individual = load_network('Net.json')
    game.play(brain=best_individual, graphical=True)
Beispiel #7
0
def test():
    parameters_test = [[48], [0.0001], ["hard_sigmoid"], ["sigmoid"], [None],
                       [None]]
    network = load_network((6 * number_of_lag, 12), parameters_test)
    print(test_values(network, X_test, y_test))
Beispiel #8
0
    def construct_model(self, inputs=None):
        # Base-learner
        self.net = net = load_network(
            **{
                'arch': self.arch,
                'datasource': self.datasource,
                'init_w': self.init_w,
                'init_b': self.init_b,
            })

        # Input nodes
        self.inputs = net.inputs
        self.compress = tf.placeholder_with_default(False, [])
        self.accumulate_g = tf.placeholder_with_default(False, [])
        self.is_train = tf.placeholder_with_default(False, [])
        self.init = tf.placeholder_with_default(False, [])

        # For convenience
        prn_keys = [
            k for p in ['w', 'b'] for k in net.weights.keys() if p in k
        ]
        var_no_train = functools.partial(tf.Variable,
                                         trainable=False,
                                         dtype=tf.float32)
        self.weights = weights = net.weights

        # Initialize weights, if enabled.
        weights_reinit = tf.cond(
            self.init, lambda: reinitialize(weights, self.init_w, self.init_b),
            lambda: weights)
        with tf.control_dependencies(
            [tf.assign(weights[k], weights_reinit[k])
             for k in weights.keys()]):
            self.weights_init = {k: tf.identity(v) for k, v in weights.items()}

        # Pruning
        mask_init = {
            k: var_no_train(tf.ones(weights[k].shape))
            for k in prn_keys
        }
        mask_prev = {
            k: var_no_train(tf.ones(weights[k].shape))
            for k in prn_keys
        }
        g_mmt_prev = {
            k: var_no_train(tf.zeros(weights[k].shape))
            for k in prn_keys
        }
        cs_prev = {
            k: var_no_train(tf.zeros(weights[k].shape))
            for k in prn_keys
        }

        def update_g_mmt():
            w_mask = apply_mask(weights, mask_init)
            logits = net.forward_pass(w_mask,
                                      self.inputs['image'],
                                      self.is_train,
                                      trainable=False)
            loss = tf.reduce_mean(compute_loss(self.inputs['label'], logits))
            grads = tf.gradients(loss, [mask_init[k] for k in prn_keys])
            gradients = dict(zip(prn_keys, grads))
            g_mmt = {k: g_mmt_prev[k] + gradients[k] for k in prn_keys}
            return g_mmt

        g_mmt = tf.cond(self.accumulate_g, lambda: update_g_mmt(),
                        lambda: g_mmt_prev)

        def get_sparse_mask():
            cs = normalize_dict({k: tf.abs(g_mmt[k]) for k in prn_keys})
            return (create_sparse_mask(cs, self.target_sparsity), cs)

        with tf.control_dependencies(
            [tf.assign(g_mmt_prev[k], g_mmt[k]) for k in prn_keys]):
            mask, cs = tf.cond(self.compress, lambda: get_sparse_mask(),
                               lambda: (mask_prev, cs_prev))
        with tf.control_dependencies(
            [tf.assign(mask_prev[k], v) for k, v in mask.items()]):
            w_final = apply_mask(weights, mask)

        # Forward pass
        logits = net.forward_pass(w_final, self.inputs['image'], self.is_train)

        # Loss
        loss_emp = tf.reduce_mean(compute_loss(self.inputs['label'], logits))
        reg = 0.00025 * tf.reduce_sum(
            [tf.reduce_sum(tf.square(v)) for v in w_final.values()])
        loss_opt = loss_emp + reg

        # Optimization
        optim, learning_rate, global_step = prepare_optimization(
            self.optimizer, self.learning_rate, self.decay_type,
            self.decay_boundaries, self.decay_values)
        update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
        with tf.control_dependencies(update_ops):
            self.train_op = optim.minimize(loss_opt, global_step=global_step)

        # Outputs
        output_class = tf.argmax(logits, axis=1, output_type=tf.int32)
        output_correct_prediction = tf.equal(self.inputs['label'],
                                             output_class)
        output_accuracy_individual = tf.cast(output_correct_prediction,
                                             tf.float32)
        output_accuracy = tf.reduce_mean(output_accuracy_individual)
        self.outputs = {
            'los': loss_opt,
            'acc': output_accuracy,
            'acc_individual': output_accuracy_individual,
        }

        # Sparsity
        self.sparsity = compute_sparsity(w_final, prn_keys)

        # Approximate dynamical isometry
        self.isometry_penalty_op = compute_isometry(w_final)

        # Jacobian singular values
        self.jsv = compute_jsv(self.inputs['image'], logits)
Beispiel #9
0
# validate and convert the image file
image_filename = args.input
if not os.path.isfile(image_filename):
    print('Unable to find image file', image_filename)
    exit()

# BUGBUG: useful for testing, but probably don't want to assume the
# image is from the image dataset
image_category = image_filename.split(os.sep)[-2]

image_data = data.process_image(args.input)

# create the network
device = 'cuda' if args.gpu else 'cpu'
model = network.load_network(args.checkpoint, device)

# predict
probs, classes = network.predict(image_data, model, device, topk=args.top_k)

# Load the category to name mapping if provided
cat_to_name = None
if args.category_names and os.path.isfile(args.category_names):
    with open(args.category_names, 'r') as f:
        cat_to_name = json.load(f)

# output results
print('Image category:', image_category)
if cat_to_name:
    print('Image name:', cat_to_name[image_category])
print('Probabilities:', probs)
Beispiel #10
0
    def construct_model(self):
        # Base-learner
        self.net = net = load_network(
            self.datasource,
            self.arch,
            self.num_classes,
            # bp: before pruning
            self.initializer_w_bp,
            self.initializer_b_bp,
            # ap: after pruning
            self.initializer_w_ap,
            self.initializer_b_ap,
        )

        print('network number of params: ', net.num_params)

        # Input nodes
        self.inputs = net.inputs

        # This values are control model running option
        self.compress = tf.placeholder_with_default(False, [])
        self.is_train = tf.placeholder_with_default(False, [])
        self.pruned = tf.placeholder_with_default(False, [])

        # Switch for weights to use (before or after pruning) + improvement
        # weights = tf.cond(self.pruned, lambda: net.weights_ap, lambda: net.weights_bp)
        weights = net.weights_ap

        # For convenience + improvement
        # e.g., ['w1', 'w2', 'w3', 'w4', 'b1', 'b2', 'b3', 'b4']
        # prn_keys = [k for p in ['w', 'u', 'b'] for k in weights.keys() if p in k]
        prn_keys = [k for p in ['w', 'u'] for k in weights.keys() if p in k]
        print("prn_keys: ", prn_keys)
        # Create partial function
        # https://docs.python.org/2/library/functools.html#functools.partial
        var_no_train = functools.partial(tf.Variable,
                                         trainable=False,
                                         dtype=tf.float32)

        # Model
        mask_init = {
            k: var_no_train(tf.ones(weights[k].shape))
            for k in prn_keys
        }
        mask_prev = {
            k: var_no_train(tf.ones(weights[k].shape))
            for k in prn_keys
        }

        def get_sparse_mask():
            w_mask = apply_mask(weights, mask_init)
            logits = net.forward_pass(w_mask,
                                      self.inputs['input'],
                                      self.is_train,
                                      trainable=False)
            loss = tf.reduce_mean(compute_loss(self.inputs['label'], logits))
            grads = tf.gradients(loss, [mask_init[k] for k in prn_keys])
            # Map keys and gradients
            gradients = dict(zip(prn_keys, grads))

            # For improvement
            rescaled_grad = {}
            for k in prn_keys:
                norm_of_weight = tf.norm(w_mask[k], ord=2)
                norm_of_grad = tf.norm(gradients[k], ord=2)
                rescaled_grad[k] = gradients[k] * (norm_of_grad /
                                                   (norm_of_weight + 1e-16))
            gradients = rescaled_grad

            # Calculate connection sensitivity
            cs = normalize_dict({k: tf.abs(v) for k, v in gradients.items()})

            return create_sparse_mask(cs, self.target_sparsity)

        mask = tf.cond(self.compress, lambda: get_sparse_mask(),
                       lambda: mask_prev)
        # Update `mask_prev` with `mask`
        # To mark dependencies, use `control_dependencies` method
        with tf.control_dependencies(
            [tf.assign(mask_prev[k], v) for k, v in mask.items()]):
            w_final = apply_mask(weights, mask)

        # For weight visualization
        # pruned_weight1 = tf.reduce_mean(mask['w1'], axis=[1], keepdims=True)
        # scaled_pruned_weight1 = pruned_weight1 * 255
        # scaled_pruned_weight1 = tf.math.round(scaled_pruned_weight1)
        # weight1_for_visualization = tf.reshape(scaled_pruned_weight1, [28, 28])
        # weight1_for_visualization = tf.cast(weight1_for_visualization, tf.uint8)

        # Forward pass
        logits = net.forward_pass(w_final, self.inputs['input'], self.is_train)

        # Loss
        opt_loss = tf.reduce_mean(compute_loss(self.inputs['label'], logits))
        reg = self.weight_decay * tf.reduce_sum(
            [tf.reduce_sum(tf.square(v)) for v in w_final.values()])
        opt_loss = opt_loss + reg

        # Optimization
        optim, lr, global_step = prepare_optimization(
            opt_loss, self.optimizer, self.lr_decay_type, self.lr,
            self.decay_boundaries, self.decay_values, self.decay_steps,
            self.end_learning_rate, self.power)
        update_ops = tf.get_collection(
            tf.GraphKeys.UPDATE_OPS)  # TF version issue
        with tf.control_dependencies(update_ops):
            self.train_op = optim.minimize(opt_loss, global_step=global_step)

        # Outputs
        output_class = tf.argmax(logits, axis=1, output_type=tf.int32)
        output_correct_prediction = tf.equal(self.inputs['label'],
                                             output_class)
        output_accuracy_individual = tf.cast(output_correct_prediction,
                                             tf.float32)
        output_accuracy = tf.reduce_mean(output_accuracy_individual)
        self.outputs = {
            'logits': logits,
            'los': opt_loss,
            'acc': output_accuracy,
            'acc_individual': output_accuracy_individual,
            # 'weight1_for_visualization': weight1_for_visualization,
            'lr': lr,
            'mask': mask,
        }
        self.sparsity = compute_sparsity(w_final, prn_keys)

        # Summaries
        tf.summary.scalar('loss', opt_loss)
        tf.summary.scalar('accuracy', output_accuracy)
        tf.summary.scalar('lr', lr)
        self.summ_op = tf.summary.merge(
            tf.get_collection(tf.GraphKeys.SUMMARIES))
Beispiel #11
0
from network import load_network, create_graph
from heuristics import heuristic1, heuristic2
from hill_climbing import hill_climbing, find_k
from search import search

print("Aguarde enquanto os dados sao carregados na memoria...")

#Carrega o grafo na memoria caso o grafo ja exista e cria o grafo a partir dos arquivos originais caso ele
#nao seja encontrado
try:
    network = load_network()
except FileNotFoundError:
    create_graph()
    network = load_network()

k = int(input("Digite o valor k (total de nos influentes): "))

print("Procurando os k mais influentes com a heuristica 1...")
l = find_k(network, k, heuristic1
           )  #chama a função para a descoberta dos k nos usando a heuristica 1
print(
    "K mais influentes encontrados.\nVerificando a popularidade dos k nos...")
r = search(
    network, l, 2
)  #busca exata para validar a popularidade dos k obtidos atraves da busca acima
print("Resultados da busca com H1:\n")
for x in range(len(l)):
    print("{} com valor de heuristica: {:.2f} e alcance de popularidade: {}".
          format(l[x], heuristic1(network[l[x]]), r[x]))
print()
import mnist_loader
import network

# unpack mnist data
training_data, validation_data, test_data = mnist_loader.load_data_wrapper()

# build network for [784, 30, 30, 10] net
net = network.Network([784, 100, 30, 10])

# training
# SGD(traning_data, epochs, mini_batch_size, eta, test_data=None)
net.SGD(training_data, 100, 10, 3, test_data=test_data)

# save and load test
network.save_network(net, "trained.npy")
b, w, mmax = network.load_network("trained.npy")

# checking
print("biases")
for line in b:
    print(line.shape)

print("weight")
for line in w:
    print(line.shape)

print("max : {}".format(mmax))
Beispiel #13
0
    def construct_model(self):
        # Base-learner
        self.net = net = load_network(
            self.datasource, self.arch, self.num_classes,
            self.initializer_w_bp, self.initializer_b_bp,
            self.initializer_w_ap, self.initializer_b_ap,
        )

        # Input nodes
        self.inputs = net.inputs
        self.compress = tf.placeholder_with_default(False, [])
        self.is_train = tf.placeholder_with_default(False, [])
        self.pruned = tf.placeholder_with_default(False, [])

        # Switch for weights to use (before or after pruning)
        weights = tf.cond(self.pruned, lambda: net.weights_ap, lambda: net.weights_bp)

        # For convenience
        prn_keys = [k for p in ['w', 'b'] for k in weights.keys() if p in k]
        # print('prn_keys:',prn_keys)
        var_no_train = functools.partial(tf.Variable, trainable=False, dtype=tf.float32)

        # Model
        mask_init = {k: var_no_train(tf.ones(weights[k].shape)) for k in prn_keys}
        # print('mask_init',mask_init)
        mask_prev = {k: var_no_train(tf.ones(weights[k].shape)) for k in prn_keys}
        # print('mask_prev',mask_prev)

        def get_sparse_mask():#获取稀疏掩码
            w_mask = apply_mask(weights, mask_init)
            logits = net.forward_pass(w_mask, self.inputs['input'],
                self.is_train, trainable=False)
            loss = tf.reduce_mean(compute_loss(self.inputs['label'], logits))
            grads = tf.gradients(loss, [mask_init[k] for k in prn_keys])
            gradients = dict(zip(prn_keys, grads))
            cs = normalize_dict({k: tf.abs(v) for k, v in gradients.items()})
            return create_sparse_mask(cs, self.target_sparsity)

        mask = tf.cond(self.compress, lambda: get_sparse_mask(), lambda: mask_prev)
        print("mask:",mask)
        with tf.control_dependencies([tf.assign(mask_prev[k], v) for k,v in mask.items()]):
            w_final = apply_mask(weights, mask)

        # Forward pass
        logits = net.forward_pass(w_final, self.inputs['input'], self.is_train)#向前传播的输出

        # Loss
        opt_loss = tf.reduce_mean(compute_loss(self.inputs['label'], logits))
        reg = 0.00025 * tf.reduce_sum([tf.reduce_sum(tf.square(v)) for v in w_final.values()])
        opt_loss = opt_loss + reg

        # Optimization
        optim, lr, global_step = prepare_optimization(opt_loss, self.optimizer, self.lr_decay_type,
            self.lr, self.decay_boundaries, self.decay_values)
        update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) # TF version issue
        with tf.control_dependencies(update_ops):
            self.train_op = optim.minimize(opt_loss, global_step=global_step)

        # Outputs
        output_class = tf.argmax(logits, axis=1, output_type=tf.int32)
        output_correct_prediction = tf.equal(self.inputs['label'], output_class)
        output_accuracy_individual = tf.cast(output_correct_prediction, tf.float32)
        output_accuracy = tf.reduce_mean(output_accuracy_individual)
        self.outputs = {
            'logits': logits,
            'los': opt_loss,
            'acc': output_accuracy,
            'acc_individual': output_accuracy_individual,
        }
        self.sparsity = compute_sparsity(w_final, prn_keys)

        # Summaries
        tf.summary.scalar('loss', opt_loss)
        tf.summary.scalar('accuracy', output_accuracy)
        tf.summary.scalar('lr', lr)
        self.summ_op = tf.summary.merge(tf.get_collection(tf.GraphKeys.SUMMARIES))