Esempio n. 1
0
 def onStart(self):
     """Register all forms for application."""
     self.begin_at = 25
     self.bootproto = ["static", "dhcp"]
     self.teaming = ['yes', 'no']
     self.host = classes.Host()
     self.network_pxe = classes.PXENetwork()
     self.network_cluster = classes.ClusterNetwork()
     self.network_trust = classes.Network()
     self.network_untrust = classes.Network()
     self.network_passive = classes.Network()
     self.storage_os = classes.Storage(mountpoint="/")
     self.storage_fast = classes.Storage(mountpoint="/var/EDCOP/fast")
     self.storage_bulk = classes.Storage(mountpoint="/var/EDCOP/bulk")
     self.storage_shared = classes.Storage(mountpoint="/var/EDCOP/shared")
     
     self.addForm("MAIN", MainForm)
     self.addForm("HOSTNAME", HostEditForm)
     self.addForm("NETWORKSELECT", NetworkSelectForm)
     self.addForm("NETWORKPXE", PXENetForm)
     self.addForm("NETWORKCLUSTER", ClusterNetForm)
     self.addForm("NETWORKTRUST", NetworkEditForm,
                  network=self.network_trust, name="Trust (LAN)")
     self.addForm("NETWORKUNTRUST", NetworkEditForm,
                  network=self.network_untrust, name="Untrust (WAN)")
     self.addForm("NETWORKPASSIVE", NetworkEditForm,
                  network=self.network_passive, name="Passive")
     self.addForm("STORAGESELECT", StorageSelectForm)
     self.addForm("STORAGEOS", StorageEditForm, storage=self.storage_os, name="EDCOP OS")
     self.addForm("STORAGEFAST", StorageEditForm, storage=self.storage_fast, name="Fast")
     self.addForm("STORAGEBULK", StorageEditForm, storage=self.storage_bulk, name="Bulk")
     self.addForm("STORAGESHARED", StorageEditForm, storage=self.storage_shared, name="Shared")
Esempio n. 2
0
def main():
    parser = argparse.ArgumentParser(
        description='Run simulation on JSON file.')

    parser.add_argument('--json',
                        '-j',
                        action='store',
                        dest='json_file_name',
                        help='Use network stored in json file',
                        required=True)

    #option for tcp reno or tcp fast
    tcp_type = parser.add_mutually_exclusive_group(required=True)
    tcp_type.add_argument('--Reno',
                          dest='tcp_type',
                          action='store_const',
                          const='Reno',
                          help='Use the TCP-Reno congestion control algorithm')

    tcp_type.add_argument("--FAST",
                          dest='tcp_type',
                          action='store_const',
                          const='FAST',
                          help='Use the TCP-FAST congestion control algorithm')

    # options for graphing metrics
    metrics = parser.add_argument_group()
    metrics.add_argument('-m',
                         dest='metrics',
                         action='store_true',
                         help='Print graphs for metrics.\
                    Requires the following subarguments:')

    metricType = metrics.add_mutually_exclusive_group()

    metricType.add_argument('--more',
                            dest='log',
                            action='store_const',
                            const='more',
                            help='Prints a timetrace from collecting\
            all data.\
            Requires the -m argument.')

    metricType.add_argument('--less',
                            dest='log',
                            action='store_const',
                            const='less',
                            help='Prints a timetrace from collecting\
            a single datum per discrete time interval. \
            Subargument for the -m argument.')

    metricType.add_argument('--avg',
                            dest='log',
                            action='store_const',
                            const='avg',
                            help='Prints an approximate (average) timetrace\
            by collecting data over a discrete time interval. \
            Subargument for the -m argument.')

    metrics.add_argument('-l',
                         '--links',
                         nargs='+',
                         type=str,
                         action='store',
                         dest='links',
                         metavar='LinkID',
                         help='Specify which\
            links are to be logged. LinkID must given in the form\
            \'L1\', \'L2\', etc. Subargument for the -m argument.')

    metrics.add_argument('-f',
                         '--flows',
                         nargs='+',
                         type=str,
                         action='store',
                         dest='flows',
                         metavar='FlowID',
                         help='Specify which\
            flows are to be logged. FlowID must given in the form\
            \'F1\', \'F2\', etc. Subargument for the -m argument.')

    parser.add_argument('-v',
                        action='store_true',
                        dest='verbose',
                        help='verbose: prints out information about events,\
            event time, and number of elements in priority queue')

    args = parser.parse_args()
    # All subargs must be present if --m is invoked
    if not args.metrics and (args.log is not None or args.links is not None
                             or args.flows is not None):
        parser.print_usage()
        print "Error: -m argument is required."
        return
    # All subargs must be present if --m is invoked
    elif args.metrics and (args.log is None or args.links is None
                           or args.flows is None):
        parser.print_usage()
        print "Error: All of --m's subargments required."
        return

    f = open(args.json_file_name)

    parsed_data = json.loads(f.read())
    if args.verbose:
        print "JSON DATA:"
        pprint.pprint(parsed_data)

    devices = {}
    links = {}
    flows = {}

    print "\n\n"

    # Parse json data into data structures
    print "Iterating over hosts:"
    for host_name in parsed_data['hosts']:
        print "Host ", host_name, "has data: ", parsed_data['hosts'][host_name]
        host = classes.Host(str(host_name))
        devices[str(host_name)] = host

    print "Iterating over routers:"
    for router_name in parsed_data['routers']:
        print "Router ", router_name, "has data: ", parsed_data['routers'][
            router_name]
        router = classes.Router(str(router_name))
        devices[str(router_name)] = router
    print "Hosts and routers instantiated. ", "\n\n"

    print "Iterating over links and adding to hosts/routers:"
    for link_name in parsed_data['links']:
        link_data = parsed_data['links'][link_name]
        print "Link ", link_name, "has data: ", link_data

        link = classes.Link(str(link_name), link_data['link_rate'],
                            link_data['link_delay'], link_data['link_buffer'],
                            devices[link_data['devices'][0]],
                            devices[link_data['devices'][1]])
        links[str(link_name)] = link
    print "Links instantiated.", "\n\n"

    print "Iterating over flows:"
    for flow_name in parsed_data['flows']:
        flow_data = parsed_data['flows'][flow_name]
        print "Flow ", flow_name, "has data: ", flow_data

        flow = classes.Flow(str(flow_name), devices[flow_data['flow_src']],
                            devices[flow_data['flow_dest']],
                            flow_data['data_amt'], flow_data['flow_start'],
                            flow_data['theoRTT'])
        flows[str(flow_name)] = flow
    print "Flows instantiated.", "\n\n"

    # Verifying metric inputs from command line are correct
    if args.metrics:
        for flowID in args.flows:
            if flowID not in flows.keys():
                print "Bad flowID in argument list."
                return
        for linkID in args.links:
            if linkID not in links.keys():
                print "Bad linkID in argument list."
                return

    network = classes.Network(devices, links, flows)
    met = None
    if args.metrics:
        met = m.Metrics(args.log, args.flows, args.links)
    simulator = simulation.Simulator(network, args.tcp_type, met)

    # Generate initial routing table
    print "Running..."
    if args.verbose:
        print "Static routing:"

    simulator.staticRouting()
    while not simulator.q.empty():
        result = simulator.processEvent()
        if args.verbose:
            print "processing one event\n" + str(result)

    if args.verbose:
        print "------------NETWORK------------"
        print "----------DEVICE DETAILS----------"
        for device_name in devices:
            print devices[device_name]

        print "----------LINK DETAILS----------"
        for link_name in links:
            print links[link_name]

        print "----------FLOW DETAILS----------"
        for flow_name in flows:
            print flows[flow_name]

        print "----------STARTING SIMULATION------------"

    # Flows begin:
    for flow_name in flows:
        flow = flows[flow_name]

        counter = 0
        timer = flow.flow_start

        newGenEvent = simulation.Event(None, None, "INITIALIZEFLOW", timer,
                                       flow)
        simulator.insertEvent(newGenEvent)

    newDynamicRoutingEvent = simulation.Event(None, None, "REROUT",
                                              constants.REROUT_TIME, None)
    simulator.insertEvent(newDynamicRoutingEvent)

    while not simulator.q.empty():
        result = simulator.processEvent()
        if args.verbose:
            print "QUEUE SIZE: " + str(
                simulator.q.qsize()) + "\n" + str(result)

    for flow_name in flows:
        flow = flows[flow_name]
        print "DATA ACKNOWLEDGED: " + str(flow.data_acknowledged)
        print "DATA MADE: " + str(flow.data_amt)

    print "Simulation for ", args.json_file_name[:
                                                 -4], args.tcp_type, args.log, " done!"
    simulator.done()
Esempio n. 3
0
            data = np.loadtxt("data/train.csv", delimiter=',', skiprows=1, usecols=)
            verify = np.loadtxt("data/test.csv", delimiter=',', skiprows=1, usecols=)
            X = data[:,0:9]
            Y = data[:,9:]
            X_ = verify[:,0:9]
            Y_ = verify[:,9:]
            network.fit(X_train, Y_train, X_, Y_)
        network.save('trained.h5')
    elif args[1] == '--predict' and len(args) == 3:
		# if args[2] == '1' or args[2] == '0':
		M = [args[2]]
		P = network.predict(M)
		print P
		# else:
			# print "Error"
			# return
    else:
		print "Error"
		return
    print "Arguments Processed!"

if __name__ == "__main__":
    if (len(sys.argv) > 1):
        network = classes.Network(9, 1)
        if os.path.exists('trained.h5'):
            network.load('trained.h5')
        run(sys.argv, network)
    else:
        print "Options: python run.py --train\n\
         python run.py --predict <values>"
Esempio n. 4
0
    if args[1] == '--train' and len(args) == 2:
        print "Training ..."
        for i in range(EPOCHS):
            (X, x, Y, y) = get_inputs('data/numbers.csv')
            network.fit(X, Y, x, y)
        network.save('trained.h5')
    elif args[1] == '--predict' and len(args) == 3:
        if args[2] == '1' or args[2] == '0':
            M = [float(args[2])]
            T = np.array([M])
            P = list(network.predict(T)[0])[0]
            print P
        else:
            print "Error"
            return
    else:
        print "Error"
        return
    print "Arguments Processed!"


if __name__ == "__main__":
    if (len(sys.argv) > 1):
        network = classes.Network(1, 1)
        if os.path.exists('trained.h5'):
            network.load('trained.h5')
        run(sys.argv, network)
    else:
        print "Options: python run.py --train\n\
         python run.py --predict <value>"
Esempio n. 5
0
			im = cv2.imread('data/images/8/61.png')
		else:
			print "Error"
			return
		grey = np.zeros((im.shape[0], im.shape[1]))
		for rownum in range(len(im)):
			for colnum in range(len(im[rownum])):
				grey[rownum][colnum] = average(im[rownum][colnum])
		P[0:28, 0:28] = grey
		P_ = np.reshape(P, (1, 1, 28, 28))
		result = network.predict(P_)[0]
		print result
#		for i in range(len(result)):
#			if result[i] == 1:
#				print i

	else:
		print 'Unknown command'
		return
	print "Arguments Processed!"

if __name__ == "__main__":
    if (len(sys.argv) > 1):
        network = classes.Network(10)
        if os.path.exists('trained.h5'):
            network.load('trained.h5')
        run(sys.argv, network)
    else:
        print "Options: python run.py --train\n\
         python run.py --predict <number>"
Esempio n. 6
0
def create_autoencoder(data, weight_range_l, weight_range_h, sig_choice, eta, autoencoder_loops):

	dataArray = classes.NN_Input(values=np.array(data))
	dataArray.values = helpers.normalize_2d(dataArray.values)
	data_length = dataArray.values.shape[1]

	# Create the network (list of layers which have a list of neurons)
	AutoEncoder = classes.Network(layers=[])
	nr_layers = int(input("-> AutoEncoder - Enter desired number of layers: "))
	for i in range(nr_layers):
		tmp_number = int(input("Enter desired number of neurons in layer {0}: ".format(i)))
		AutoEncoder.add_layer(classes.Layer(nr_neurons=tmp_number, neurons=[]))

	# The output layer (as many neurons as there are possible outputs)
	AutoEncoder.add_layer(classes.Layer(nr_neurons=data_length, neurons=[]))	

	# Add connections into the network
	InputList = []
	for i in range(data_length):
		InputList.append(classes.Input_Value())
	for i in range(len(AutoEncoder.layers[0].neurons)):	# Amount of neurons in the layer
		for j in range(data_length):	# Amount of inputs
			AutoEncoder.layers[0].neurons[i].add_input(classes.Synapse(s_out=AutoEncoder.layers[0].neurons[i], s_weight=random.uniform(weight_range_l, weight_range_h)))
			AutoEncoder.layers[0].neurons[i].inputs[j].input = InputList[j]

	# Add connections out of the network
	for i in range(len(AutoEncoder.layers[nr_layers-1].neurons)):		# Amount of neurons in the layer
		for j in range(data_length):	# Amount of outputs
			# Neuron <i> of the last AE layer has an output synapse <j> whose input is that neuron and output is neuron <j> of the output layer
			AutoEncoder.layers[nr_layers-1].neurons[i].add_output(classes.Synapse(s_in=AutoEncoder.layers[nr_layers-1].neurons[i], s_out=AutoEncoder.layers[nr_layers].neurons[j], s_weight=random.uniform(weight_range_l, weight_range_h)))
			# Neuron <j> of the output layer has an input synapse which is the output synpase <j> of the neuron <i> in the last NN layer
			AutoEncoder.layers[nr_layers].neurons[j].add_input(AutoEncoder.layers[nr_layers-1].neurons[i].outputs[j])

	# Add connections within the network
	for l in range(nr_layers-1):
		for i in range(len(AutoEncoder.layers[l].neurons)):
			for j in range(len(AutoEncoder.layers[l+1].neurons)):
				AutoEncoder.layers[l].neurons[i].add_output(classes.Synapse(s_in=AutoEncoder.layers[l].neurons[i], s_out=AutoEncoder.layers[l+1].neurons[j], s_weight=random.uniform(weight_range_l, weight_range_h)))
				AutoEncoder.layers[l+1].neurons[j].add_input(AutoEncoder.layers[l].neurons[i].outputs[j])

	# Training phase
	for m in range(autoencoder_loops):
		np.random.shuffle(dataArray.values)
		learning_rate = eta
		for n in range(dataArray.values.shape[0]): # For each line of input data
			# Input propagation phase
			for i in range(data_length): 	# Number of inputs
				InputList[i].y = dataArray.values[n, i]			# Set input values
			AutoEncoder.input_propagation(choice=sig_choice)

			# for i in range(data_length):	# Number of outputs
			# 	AutoEncoder.layers[nr_layers].neurons[i].y = AutoEncoder.layers[nr_layers].neurons[i].S

			# Error propagation phase
			learning_rate = 1*learning_rate
			AutoEncoder.error_propagation(dataArray.values[n, :], learning_rate)

			# Printing results for last phase
			if m == autoencoder_loops-1:
				aeOut = []
				for i in range(len(AutoEncoder.layers[nr_layers].neurons)):
					aeOut.append(AutoEncoder.layers[nr_layers].neurons[i].y)
				# Printing results for loop <n>
				aeData = ["%.2f" % float(elem) for elem in dataArray.values[n]]
				aeOut = ["%.2f" % float(elem) for elem in aeOut]
				#print("Training phase {0}: Data {1}, Output {2}".format(n, aeData, aeOut))

			#AutoEncoder.debug() # Print out all values in the network (i.e. synapse weights)

	# Extracting the encoder portion
	for i in range(nr_layers-1):
		if len(AutoEncoder.layers[i].neurons) < len(AutoEncoder.layers[i+1].neurons):
			encoderLayers = []
			for j in range(i+1):
				encoderLayers.append(AutoEncoder.layers[j])
			for j in range(len(encoderLayers[-1].neurons)):
				encoderLayers[-1].neurons[j].outputs = []
			Encoder = classes.Network(layers=encoderLayers)
			break

	return Encoder
Esempio n. 7
0
def initialize_mlp(data_filename, weight_range_l, weight_range_h, sig_choice, eta, training_loops):

	# Read data
	#[tmpIn, tmpExp] = fileHandling.readData(data_filename)
	#categories = np.unique(tmpExp)
	[tmpIn, tmpExp, categories] = xlsParser.read_xls(data_filename)
	nr_categories = len(categories)
	trInput = classes.NN_Input(values=np.array(helpers.normalize_2d(tmpIn)))
	trExpected = classes.NN_Output(values=np.array(tmpExp))
	trOutput = classes.NN_Output(values=[])

	# Create the network (list of layers which have a list of neurons)
	NeuralNetwork = classes.Network(layers=[])
	nr_layers = int(input("-> Enter desired number of layers: "))
	for i in range(nr_layers):
		tmp_number = int(input("Enter desired number of neurons in layer {0}: ".format(i)))
		NeuralNetwork.add_layer(classes.Layer(nr_neurons=tmp_number, neurons=[]))

	# The output layer (as many neurons as there are possible outputs)
	NeuralNetwork.add_layer(classes.Layer(nr_neurons=nr_categories, neurons=[]))	

	# Add connections into the network
	InputList = []
	for i in range(trInput.values.shape[1]):
		InputList.append(classes.Input_Value())
	for i in range(len(NeuralNetwork.layers[0].neurons)):	# Amount of neurons in the layer
		for j in range(trInput.values.shape[1]):	# Amount of inputs going into the network
			NeuralNetwork.layers[0].neurons[i].add_input(classes.Synapse(s_out=NeuralNetwork.layers[0].neurons[i], s_weight=random.uniform(weight_range_l, weight_range_h)))
			NeuralNetwork.layers[0].neurons[i].inputs[j].input = InputList[j]

	# Add connections out of the network
	for i in range(len(NeuralNetwork.layers[nr_layers-1].neurons)):		# Amount of neurons in the layer
		for j in range(nr_categories):	# Amount of output categories to classify into
			# Neuron <i> of the last NN layer has an output synapse <j> whose input is that neuron and output is neuron <j> of the output layer
			NeuralNetwork.layers[nr_layers-1].neurons[i].add_output(classes.Synapse(s_in=NeuralNetwork.layers[nr_layers-1].neurons[i], s_out=NeuralNetwork.layers[nr_layers].neurons[j], s_weight=random.uniform(weight_range_l, weight_range_h)))
			# Neuron <j> of the output layer has an input synapse which is the output synpase <j> of the neuron <i> in the last NN layer
			NeuralNetwork.layers[nr_layers].neurons[j].add_input(NeuralNetwork.layers[nr_layers-1].neurons[i].outputs[j])

	# Add connections within the network
	for l in range(nr_layers-1):
		for i in range(len(NeuralNetwork.layers[l].neurons)):
			for j in range(len(NeuralNetwork.layers[l+1].neurons)):
				NeuralNetwork.layers[l].neurons[i].add_output(classes.Synapse(s_in=NeuralNetwork.layers[l].neurons[i], s_out=NeuralNetwork.layers[l+1].neurons[j], s_weight=random.uniform(weight_range_l, weight_range_h)))
				NeuralNetwork.layers[l+1].neurons[j].add_input(NeuralNetwork.layers[l].neurons[i].outputs[j])

	# Training phase
	for m in range(training_loops):
		for n in range(trInput.values.shape[0]): # For each line of input data
			# Input propagation phase
			for i in range(trInput.values.shape[1]): 	# Number of inputs
				InputList[i].y = trInput.values[n, i]			# Set input values
			NeuralNetwork.input_propagation(choice=sig_choice)

			# Error propagation phase
			NeuralNetwork.error_propagation(trExpected.values[n, :], eta)

			# Printing results for phase <n>
			nnIn = []
			nnExp = []
			nnOut = []
			for i in range(len(NeuralNetwork.layers[nr_layers].neurons)):
				nnExp.append(trExpected.values[n, i])
				nnOut.append(NeuralNetwork.layers[nr_layers].neurons[i].y)
			# Printing results for loop <n>
			nnIn.append(trInput.values[n])
			#nnIn = ["%.2f" % float(elem) for elem in nnIn]
			nnOut = ["%.2f" % float(elem) for elem in nnOut]
			# print("Training phase {0}: Input {1}, Expected {2}, Output {3}".format(n, nnIn, nnExp, nnOut))
			trOutput.values.append(nnOut)	# Adding results to output table

			# NeuralNetwork.debug() # Print out all values in the network (i.e. synapse weights)

	return NeuralNetwork