if args == 1:
    # starts master theads and spawns the relavent workers and creates communicators from the masters to workers
    comm_global = MPI.COMM_WORLD
    rank = comm_global.Get_rank()
    size = comm_global.Get_size()
    comm_children = MPI.COMM_WORLD.Spawn(sys.executable,
                                         args=[sys.argv[0], start_worker],
                                         maxprocs=n_workers * size)
    start = n_workers * rank
    end = start + n_workers - 1
    if rank == 0:
        # on root starts a logger to save results
        f = l.DataLogger(
            "SPSA",
            n_layer,
            n_nodes,
            header=
            "Epoch,(cost,accuracy),Computation_Time,Train_Accuracy,Test_Accuracy",
            testing=True)
else:
    # starts comminicator for worker
    comm_global = MPI.COMM_WORLD
    comm_parent = MPI.Comm.Get_parent()
    rank = comm_parent.Get_rank()
    size = comm_parent.Get_size()


# Constructing the hidden and output layers
def multilayer_perceptron():
    prevlayer = x
    for w, b in zip(weights, biases):
Пример #2
0
from os import listdir
from os.path import isfile, join
import cPickle
import numpy as np
from mpi4py import MPI
import time
from sys import path
from os import getcwd
p = getcwd()[0:getcwd().rfind("/")]+"/SGD"
path.append(p)


p = getcwd()[0:getcwd().rfind("/")]+"/Logger"
path.append(p)
import Logger
logfile = Logger.DataLogger("SVHN_LBFGS","Epoch,time,train_accuaracy,test_accuaracy,train_cost,test_cost")
batch_size = 100

NUM_CLASSES = 10
from cifar10 import read_data_sets

from ParamServer import ParamServer
from ModelReplica import DPSGD


comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()

# Image processing for training the network. Note the many random
# distortions applied to the image.
Пример #3
0
p = getcwd()[0:getcwd().rfind("/")] + "/MCMC"
path.append(p)

from Multi_try_Metropolis import MCMC
from cifar10 import read_data_sets

comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()

NUM_CLASSES = 10
p = getcwd()[0:getcwd().rfind("/")] + "/Logger"
path.append(p)
import Logger
logfile = Logger.DataLogger(
    "CIFAR10_MCMC",
    "Epoch,time,train_accuaracy,test_accuaracy,train_cost,test_cost")

# Image processing for training the network. Note the many random
# distortions applied to the image.

# Randomly crop a [height, width] section of the image.
#distorted_image = tf.random_crop(reshaped_image, [total_size, height, height,3])
# test_data = tf.random_crop(test_data, [total_size, height, height,3])
# # Randomly flip the image horizontally.
# distorted_image = tf.image.random_flip_left_right(distorted_image)

# # Because these operations are not commutative, consider randomizing
# # the order their operation.
# distorted_image = tf.image.random_brightness(distorted_image,
#                                            max_delta=63)
Пример #4
0
import time
from sys import path
from os import getcwd

from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf
from mpi4py import MPI

p = getcwd()[0:getcwd().rfind("/")]+"/SGD"
path.append(p)

p = getcwd()[0:getcwd().rfind("/")]+"/Logger"
path.append(p)
import Logger
logfile = Logger.DataLogger("MNIST_SGD","Epoch,time,train_accuaracy,test_accuaracy,train_cost,test_cost")

from ParamServer import ParamServer
from ModelReplica import DPSGD




comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
# Parameters
learning_rate = 0.001
training_epochs = 100
batch_size = 100