Beispiel #1
0
from network import Node, Network


def simulate_transactions(node1, node2, num=3):
    """ simulating nodes using the tokens """
    print('simulating {} transactions'.format(num))
    return [
        node1.make_transaction(node2, float(np.random.randint(10)))
        for _ in range(num)
    ]


if __name__ == '__main__':
    OVERDRAFT_LIMIT = -10

    net = Network([Node('node'), Node('other')],
                  overdraft_limit=OVERDRAFT_LIMIT)

    balances = defaultdict(int)
    for _ in range(3):
        #  simulate a few transactions between nodes
        new_transactions = simulate_transactions(net[0], net[1])

        #  check the transactions are valid
        balances, transactions = net.validate_transactions(
            balances, new_transactions)

        new_proof = net.proof_of_work()
        #  randomly select a miner
        miner = net[np.random.randint(len(net))]

        #  miner adds the block to it's chain
def train():
    x = tf.placeholder(tf.float32, [BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, 3])
    mask = tf.placeholder(tf.float32, [BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, 1])
    local_x = tf.placeholder(tf.float32,
                             [BATCH_SIZE, LOCAL_SIZE, LOCAL_SIZE, 3])
    global_completion = tf.placeholder(tf.float32,
                                       [BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, 3])
    local_completion = tf.placeholder(tf.float32,
                                      [BATCH_SIZE, LOCAL_SIZE, LOCAL_SIZE, 3])
    is_training = tf.placeholder(tf.bool, [])

    model = Network(x,
                    mask,
                    local_x,
                    global_completion,
                    local_completion,
                    is_training,
                    batch_size=BATCH_SIZE)
    sess = tf.Session()
    global_step = tf.Variable(0, name='global_step', trainable=False)
    epoch = tf.Variable(0, name='epoch', trainable=False)

    opt = tf.train.AdamOptimizer(learning_rate=LEARNING_RATE)
    g_train_op = opt.minimize(model.g_loss,
                              global_step=global_step,
                              var_list=model.g_variables)
    d_train_op = opt.minimize(model.d_loss,
                              global_step=global_step,
                              var_list=model.d_variables)

    init_op = tf.global_variables_initializer()
    sess.run(init_op)

    if tf.train.get_checkpoint_state('./backup'):
        saver = tf.train.Saver()
        saver.restore(sess, './backup/latest')

    x_train, x_test = load.load()
    x_train = np.array([a / 255.0 - datamean for a in x_train])
    x_test = np.array([a / 255.0 - datamean for a in x_test])

    step_num = int(len(x_train) / BATCH_SIZE)

    while True:
        sess.run(tf.assign(epoch, tf.add(epoch, 1)))
        print('epoch: {}'.format(sess.run(epoch)))

        np.random.shuffle(x_train)

        # Discrimitation pretrain
        if sess.run(epoch) <= 100:
            d_loss_value = 0
            for i in tqdm.tqdm(range(step_num)):
                x_batch = x_train[i * BATCH_SIZE:(i + 1) * BATCH_SIZE]
                points_batch, mask_batch = get_points()

                completion = sess.run(model.completion,
                                      feed_dict={
                                          x: x_batch,
                                          mask: mask_batch,
                                          is_training: False
                                      })

                local_x_batch = []
                local_completion_batch = []
                for i in range(BATCH_SIZE):
                    x1, y1, x2, y2 = points_batch[i]
                    local_x_batch.append(x_batch[i][y1:y2, x1:x2, :])
                    local_completion_batch.append(completion[i][y1:y2,
                                                                x1:x2, :])
                local_x_batch = np.array(local_x_batch)
                local_completion_batch = np.array(local_completion_batch)

                _, d_loss = sess.run(
                    [d_train_op, model.d_loss],
                    feed_dict={
                        x: x_batch,
                        mask: mask_batch,
                        local_x: local_x_batch,
                        global_completion: completion,
                        local_completion: local_completion_batch,
                        is_training: True
                    })
                d_loss_value += d_loss

            print('Discriminator loss: {}'.format(d_loss_value))

            np.random.shuffle(x_test)
            x_batch = x_test[:BATCH_SIZE]
            completion = sess.run(model.completion,
                                  feed_dict={
                                      x: x_batch,
                                      mask: mask_batch,
                                      is_training: False
                                  })
            sample = np.array((completion[0] + datamean) * 255.0,
                              dtype=np.uint8)
            cv2.imwrite(
                './output/{}.jpg'.format("{0:06d}".format(sess.run(epoch))),
                cv2.cvtColor(sample, cv2.COLOR_RGB2BGR))

            saver = tf.train.Saver()
            saver.save(sess, './backup/latest', write_meta_graph=False)

        # Completion
        elif sess.run(epoch) <= PRETRAIN_EPOCH + 100:
            g_loss_value = 0
            for i in tqdm.tqdm(range(step_num)):
                x_batch = x_train[i * BATCH_SIZE:(i + 1) * BATCH_SIZE]
                points_batch, mask_batch = get_points()

                _, g_loss = sess.run([g_train_op, model.g_loss],
                                     feed_dict={
                                         x: x_batch,
                                         mask: mask_batch,
                                         is_training: True
                                     })
                g_loss_value += g_loss

            print('Completion loss: {}'.format(g_loss_value))

            np.random.shuffle(x_test)
            x_batch = x_test[:BATCH_SIZE]
            completion = sess.run(model.completion,
                                  feed_dict={
                                      x: x_batch,
                                      mask: mask_batch,
                                      is_training: False
                                  })
            sample = np.array((completion[0] + datamean) * 255.0,
                              dtype=np.uint8)
            cv2.imwrite(
                './output/{}.jpg'.format("{0:06d}".format(sess.run(epoch))),
                cv2.cvtColor(sample, cv2.COLOR_RGB2BGR))

            saver = tf.train.Saver()
            saver.save(sess, './backup/latest', write_meta_graph=False)
            if sess.run(epoch) == PRETRAIN_EPOCH:
                saver.save(sess, './backup/pretrained', write_meta_graph=False)

        # Discrimitation
        else:
            g_loss_value = 0
            d_loss_value = 0
            for i in tqdm.tqdm(range(step_num)):
                x_batch = x_train[i * BATCH_SIZE:(i + 1) * BATCH_SIZE]
                points_batch, mask_batch = get_points()

                _, g_loss, completion = sess.run(
                    [g_train_op, model.g_loss, model.completion],
                    feed_dict={
                        x: x_batch,
                        mask: mask_batch,
                        is_training: True
                    })
                g_loss_value += g_loss

                local_x_batch = []
                local_completion_batch = []
                for i in range(BATCH_SIZE):
                    x1, y1, x2, y2 = points_batch[i]
                    local_x_batch.append(x_batch[i][y1:y2, x1:x2, :])
                    local_completion_batch.append(completion[i][y1:y2,
                                                                x1:x2, :])
                local_x_batch = np.array(local_x_batch)
                local_completion_batch = np.array(local_completion_batch)

                _, d_loss = sess.run(
                    [d_train_op, model.d_loss],
                    feed_dict={
                        x: x_batch,
                        mask: mask_batch,
                        local_x: local_x_batch,
                        global_completion: completion,
                        local_completion: local_completion_batch,
                        is_training: True
                    })
                d_loss_value += d_loss

            print('Completion loss: {}'.format(g_loss_value))
            print('Discriminator loss: {}'.format(d_loss_value))

            np.random.shuffle(x_test)
            x_batch = x_test[:BATCH_SIZE]
            completion = sess.run(model.completion,
                                  feed_dict={
                                      x: x_batch,
                                      mask: mask_batch,
                                      is_training: False
                                  })
            sample = np.array((completion[0] + datamean) * 255.0,
                              dtype=np.uint8)
            cv2.imwrite(
                './output/{}.jpg'.format("{0:06d}".format(sess.run(epoch))),
                cv2.cvtColor(sample, cv2.COLOR_RGB2BGR))

            saver = tf.train.Saver()
            saver.save(sess, './backup/latest', write_meta_graph=False)
Beispiel #3
0

def softmax_sample(distribution, temperature: float):
    visits = [i[0] for i in distribution]
    actions = [i[1] for i in distribution]
    if temperature == 0:
        return actions[visits.index(max(visits))]
    elif temperature == 1:
        visits_sum = sum(visits)
        visits_prob = [i / visits_sum for i in visits]
        return np.random.choice(actions, p=visits_prob)
    elif temperature > 0 and temperature < 1:
        visits = [visit**(1 / temperature) for visit in visits]
        visits_sum = sum(visits)
        visits_prob = [i / visits_sum for i in visits]
        return np.random.choice(actions, p=visits_prob)
    else:
        raise NotImplementedError


if __name__ == '__main__':
    base_network = Network()
    base_network.load_state_dict(
        torch.load('./models/1000model.pth', map_location='cpu'))

    target_network = Network()
    target_network.load_state_dict(
        torch.load('./models/100000model.pth', map_location='cpu'))

    test(base_network, target_network)
Beispiel #4
0
            for event in pygame.event.get():
                if event.type == pygame.QUIT:
                    run = False
                    pygame.quit()

                if event.type == pygame.MOUSEBUTTONDOWN:
                    draw_board()
            pygame.display.update()
    else:
        print("could not connect")
    threaded_listen()


########################################################################################################################
pygame.init()
n = Network()
clientID = n.connect()
if n.connected:
    print('connected')
    n.send('join', 0)
    print('DEBUG: joined')
    # get initial values
    print('DEBUG: get mapPath')
    mapPath = n.send('get', 'mapPath')
    print(mapPath)
    print('DEBUG: get boats')
    boats = n.send('get', 'boats')
    print('DEBUG: get buoys')
    buoys = n.send('get', 'buoys')
    print('DEBUG: get wind')
    wind = n.send('get', 'wind')
Beispiel #5
0
from flask import Flask, request, render_template
from flask_restplus import Resource, Api, fields
from flask_socketio import SocketIO, emit
import time
from flask_cors import CORS
from network import Network
import json
from multiprocessing import Process

app = Flask(__name__)
CORS(app)
api = Api(app)
app.config['SECRET_KEY'] = 'secret!'
socketio = SocketIO(app)
net = Network('network.db')
startserver = time.time()
ns = api.namespace('todos', description='TODO operations')
net.delete_all_bots()


def f(delay):
    while True:
        current_time = time.time()
        current_bots_time = net.get_bots_time()
        print(current_bots_time)
        current_bots_ids = net.get_bots_id()
        print(current_bots_ids)
        for i in range(len(current_bots_time)):
            print(i)
            if ((current_time - current_bots_time[i]) >= delay):
Beispiel #6
0
# this file runs the program, some comments on how to change the parameters are shown below

from network import Network
from greedy import Greedy
from annealing import Annealing

# A call to initialize an instance of the network class

# will create a new map with new cities
# change the N parameter to adjust how many cities appear, annealer does better with fewer

n = Network(N=15)

# Calls the greedy and annealer algorithms on the network created in the above block.
# Visualizes results for comparison.

print("Greedy")

g = Greedy(n)
g.run()
n.draw_route(g.route, text=" - Greedy")

print()
print("Annealing")

a = Annealing(
    n, T=50,
    c=0.9995)  # larger T means more time exploring and less time descending
# If you make T much bigger, add more nines to c so you
# don't get a zero division error
Beispiel #7
0
        cols = train[0].container.data.shape[2]

        if dset == "T2D":

            net = Network([
                ConvPoolLayer(activation_fn=ReLU,
                              image_shape=(mini_batch_size, 1, rows, cols),
                              filter_shape=(64, 1, 1, 10),
                              poolsize=(1, 2)),
                ConvPoolLayer(activation_fn=ReLU,
                              image_shape=(mini_batch_size, 64, 1, 102),
                              filter_shape=(64, 64, 1, 10),
                              poolsize=(1, 2)),
                ConvPoolLayer(activation_fn=ReLU,
                              image_shape=(mini_batch_size, 64, 1, 46),
                              filter_shape=(64, 64, 1, 10),
                              poolsize=(1, 2)),
                FullyConnectedLayer(activation_fn=ReLU,
                                    n_in=64 * 1 * 18,
                                    n_out=dim1,
                                    p_dropout=dropout),
                FullyConnectedLayer(n_in=dim1,
                                    n_out=dim2,
                                    activation_fn=ReLU,
                                    p_dropout=dropout),
                SoftmaxLayer(n_in=dim2, n_out=2, p_dropout=dropout)
            ], mini_batch_size, c_prob)

        if dset == "Obesity":

            net = Network([
from network import Network, predict_from_output
from helpers import load_training_data, convert_to_xy
import numpy as np

img, lbl = load_training_data()
X, Y = convert_to_xy(img, lbl)
X_train, Y_train, X_test, Y_test = create_train_set(X, Y)

learning_rate = 0.003
layer_dims = [X.shape[0], 30, 20, Y.shape[0]]
net = Network(layer_dims, learning_rate)

costs = net.train(X, Y, 500)

print costs
def test_create_router():
    """Test creation of a router."""
    network = Network()
    _router = Router(network, "router-name")
Beispiel #10
0
def create_structure(tf, input_image, input_data, input_size, dropout, config):
    branches = []

    x = input_image
    # assert the input image is the segmentation output, which has size None*39*52*54
    x = depth2space(x, 3)
    # now it has shape [None, 39*3, 52*3, 9]
    # now we add it's visualization
    xshape = x.get_shape()
    vis = tf.py_func(visualize_index, [x], tf.uint8)
    vis.set_shape([xshape[0], xshape[1], xshape[2], 3])
    tf.summary.image('SegVis', vis)

    network_manager = Network(config, dropout, tf.shape(x))
    '''dimension reduction'''
    # size 39*52*295
    # new shape 39*3 = 117, 52*3 = 156
    xc = network_manager.conv_block(x, 3, 2, 64, padding_in='VALID')
    # size 58*77
    print(xc)
    """conv1"""  # kernel sz, stride, num feature maps
    xc = network_manager.conv_block(xc, 3, 1, 64, padding_in='VALID')
    # size 56*75
    print(xc)
    """conv2"""
    xc = network_manager.conv_block(xc, 3, 2, 128, padding_in='VALID')
    # size 27*37
    print(xc)
    xc = network_manager.conv_block(xc, 3, 1, 128, padding_in='VALID')
    # size 25*35
    print(xc)
    """conv3"""
    xc = network_manager.conv_block(xc, 3, 2, 256, padding_in='VALID')
    # size 12*17
    print(xc)
    xc = network_manager.conv_block(xc, 3, 1, 256, padding_in='VALID')
    # size 10*15
    print(xc)
    """conv4"""
    xc = network_manager.conv_block(xc, 3, 2, 512, padding_in='VALID')
    # size 4*7
    print(xc)
    xc = tf.reduce_mean(xc, axis=[1, 2], keep_dims=True)
    print(xc)
    """mp3 (default values)"""
    """ reshape """
    x = tf.reshape(xc, [-1, int(np.prod(xc.get_shape()[1:]))], name='reshape')
    print(x)
    """ fc1 """
    x = network_manager.fc_block(x, 512)
    print(x)
    """ fc2 """
    x = network_manager.fc_block(x, 512)
    """Process Control"""
    # control = tf.reshape(control, [-1, int(np.prod(control.get_shape()[1:]))],name = 'reshape_control')
    # print control
    """ Speed (measurements)"""
    with tf.name_scope("Speed"):
        speed = input_data[config.inputs_names.index(
            "Speed")]  # get the speed from input data
        speed = network_manager.fc_block(speed, 128)
        speed = network_manager.fc_block(speed, 128)
    """ Joint sensory """
    j = tf.concat([x, speed], 1)
    j = network_manager.fc_block(j, 512)
    """Start BRANCHING"""
    for i in range(0, len(config.branch_config)):
        with tf.name_scope("Branch_" + str(i)):
            if config.branch_config[i][0] == "Speed":
                # we only use the image as input to speed prediction
                branch_output = network_manager.fc_block(x, 256)
                branch_output = network_manager.fc_block(branch_output, 256)
            else:
                branch_output = network_manager.fc_block(j, 256)
                branch_output = network_manager.fc_block(branch_output, 256)

            branches.append(
                network_manager.fc(branch_output,
                                   len(config.branch_config[i])))

        print(branch_output)

    weights = network_manager.get_weigths_dict()

    features = network_manager.get_feat_tensors_dict()

    vis_images = network_manager.get_vbp_images(xc)
    print(vis_images)

    print(vis_images.get_shape())

    # vis_images = tf.div(vis_images  -tf.reduce_min(vis_images),tf.reduce_max(vis_images) -tf.reduce_min(vis_images))

    # branches: each of them is a vector of the output(all vars you care) conditioned on that input control signal
    return branches, vis_images, features, weights
Beispiel #11
0
"""
PyPepperChecker module
"""

from tcp_server import TcpServer
from network import Network

OUTPUT_SIZE = 3
INPUT_SIZE = 172 * 229 * 3

print("Building neural network...")
network = Network(INPUT_SIZE, OUTPUT_SIZE)

print "Processing images samples..."
network.process_images()

print "Training network..."
network.train()

print("Running TCP server...")
tcp_server = TcpServer(network)
tcp_server.run()
Beispiel #12
0
from keras.models import load_model
from network import multimodal_cross_entropy
from manipulate_data import convert_rgb_to_lab, merge_channels
from softencoding import decode, softencoding
from network import Network
import numpy as np
import os
import cv2

## Change with wanted model name
model_name = 'new_model.h5'
net = Network(model_name)
net.load()

test_folder = "Dataset/Test/images"

folder = os.fsencode(test_folder)
for file in os.listdir(folder):
    filename = os.fsdecode(file)
    if filename.endswith(('.JPEG', '.png', '.jpg')):
        image = cv2.imread(os.path.join(test_folder, filename))
        image_small = cv2.resize(image, (16, 16))

        l, a, b = convert_rgb_to_lab(image)

        x_test = np.empty((1, 64, 64, 1), dtype=np.float32)
        x_test[0, :, :, 0] = l / 255.

        pred = net.predict(x_test)

        a, b = decode(pred.reshape((16, 16, 313)))
Beispiel #13
0
def run_simulation(filename):
    network = Network()

    with open(filename) as f:
        net_data = json.load(f)

    for host in net_data["hosts"]:
        # Note: we will add the links to the host when we initialize links
        network.create_host(host["ip"], int(host["id"]))

    for router in net_data["routers"]:
        # Note: we will add the links to the router when we initialize links
        network.create_router(router["ip"], int(router["id"]))

    for flow in net_data["flows"]:
        src = network.hosts[int(flow["source"])]
        dest = network.hosts[int(flow["dest"])]
        new_flow = network.create_flow(
            convert_to_bits(float(flow["data_amount"]), MB), src, dest,
            float(flow["start_time"]), float(flow["window"]), flow["protocol"],
            int(flow["id"]))
        src.flows.append(new_flow)

    # Keep track of this so we can add links going the opposite direction
    highest_link_id = max([int(link["id"]) for link in net_data["links"]])
    for link in net_data["links"]:

        if link["source"][0] == "H":
            src = network.hosts[int(link["source"][1])]
            if net_data["routers"] != []:
                src.router = network.routers[int(link["sink"][1])]
        else:
            src = network.routers[int(link["source"][1])]

        if link["sink"][0] == "H":
            sink = network.hosts[int(link["sink"][1])]
            if net_data["routers"] != []:
                sink.router = network.routers[int(link["source"][1])]
        else:
            sink = network.routers[int(link["sink"][1])]

        new_link_1 = network.create_link(src, sink, \
            convert_to_bits(float(link["buff_size"]), KB), \
            convert_to_bits(float(link["link_rate"]), Mb), \
            convert_to_seconds(float(link["prop_delay"])), int(link["id"]))
        new_link_2 = network.create_link(sink, src, \
            convert_to_bits(float(link["buff_size"]), KB), \
            convert_to_bits(float(link["link_rate"]), Mb), \
            convert_to_seconds(float(link["prop_delay"])), highest_link_id + 1)
        network.correspond_links[new_link_1] = new_link_2
        network.correspond_links[new_link_2] = new_link_1
        highest_link_id += 1

        if isinstance(src, Host):
            src.outgoing_link = new_link_1
            src.incoming_link = new_link_2
        else:
            src.outgoing_links.append(new_link_1)
            src.incoming_links.append(new_link_2)
            src.neighbors.append(sink)
        if isinstance(sink, Host):
            sink.outgoing_link = new_link_2
            sink.incoming_link = new_link_1
        else:
            sink.outgoing_links.append(new_link_2)
            sink.incoming_links.append(new_link_1)
            sink.neighbors.append(src)

    # In Debug mode, we want to print out all the fields we set at initialization
    if DEBUG:
        print("\n")
        print("Printing state of network at initialization time.")
        links_list = list(network.links.items())
        if (len(links_list) > 0):
            print("\n")
            print("________Links________")
        for link_id, link in links_list:
            print("Printing out fields for Link " + str(link_id) + ".")
            print("    Capacity: " + str(link.capacity))
            print("    Source IP Address: " + str(link.connection1.ip))
            print("    Destination IP Address: " + str(link.connection2.ip))
            print("    Propogation Time: " + str(link.prop_time))
            print("    Queue Capacity: " + str(link.queue_capacity))

        hosts_list = list(network.hosts.items())
        if (len(hosts_list) > 0):
            print("\n")
            print("________Hosts________")
        for host in hosts_list:
            host_id = host[0]
            print("Printing out Fields for Host " + str(host_id) + ".")
            print("    IP Address: " + str(host[1].ip))
            print("    ID of Links to/from Host:")
            print("    " + str(host[1].incoming_link.id))
            print("    " + str(host[1].outgoing_link.id))
            print("    ID of Flows from Host:")
            for flow in host[1].flows:
                print("    " + str(flow.id))
            if host[1].router:
                print("    ID of connected router: " + str(host[1].router.ip))

        routers_list = list(network.routers.items())
        if (len(routers_list) > 0):
            print("\n")
            print("________Routers________")
        for router in routers_list:
            router_id = router[0]
            print("Printing out Fields for Router " + str(router_id) + ".")
            print("    IP Address: " + str(router[1].ip))
            print("    IDs of Outgoing Links:")
            for link in router[1].outgoing_links:
                print("    " + str(link.id))
            print("    IDs of Incoming Links:")
            for link in router[1].incoming_links:
                print("    " + str(link.id))
            print("    IP Addresses of Neighbors:")
            for neighbor in router[1].neighbors:
                print("    " + str(neighbor.ip))

        flows_list = list(network.flows.items())
        if (len(flows_list) > 0):
            print("\n")
            print("________Flows________")
        for flow in flows_list:
            flow_id = flow[0]
            print("Printing out Fields for Flow " + str(flow_id) + ".")
            print("    Number of Bits: " + str(flow[1].size))
            print("    Source IP Address: " + str(flow[1].source.ip))
            print("    Destination IP Address: " + str(flow[1].destination.ip))
            print("    Time Spawned " + str(flow[1].time_spawn))
            print("    Window Size: " + str(flow[1].window))
            print("    Protocol: " + str(flow[1].protocol))

    # This will find the minimum time step for each iteration
    lst_link_prop = []
    lst_link_rate = []
    links_list = list(network.links.items())
    for links in links_list:
        lst_link_rate.append(links[1].capacity)
        lst_link_prop.append(links[1].prop_time)
    timestep = find_time_step(lst_link_rate, lst_link_prop)
    network.timestep = timestep

    # Start the network!
    network.run_network()

    # Get the values for the calculations each link keeps track of
    # Convert to a list so that we can index each item
    links_list = list(network.links.items())
    size = len(links_list) / 2
    packet_loss_dicts = []
    buffer_occ_dicts = []
    link_rate_dicts = []
    link_order = []

    start_index = 0 not in network.links
    for element in links_list:
        if element[0] < size + start_index:
            name = 'L' + str(element[0]) + '_right'
        else:
            name = 'L' + str(int(element[0] % size) + start_index) + '_left'
        link_order.append(name)
        packet_loss_dicts.append(element[1].packet_loss)
        buffer_occ_dicts.append(element[1].buffer_occupancy)
        link_rate_dicts.append(element[1].link_rates)

    # Get the values for the calculations each flow keeps track of
    flow_list = list(network.flows.items())
    wind_size_dicts = []
    flow_rate_dicts = []
    packet_delay_dicts = []
    flow_order = []
    for element in flow_list:
        flow_order.append("F" + str(element[0]))
        wind_size_dicts.append(element[1].window_sizes)
        flow_rate_dicts.append(element[1].flow_rates)
        packet_delay_dicts.append(element[1].packet_delays)

    # For tests 3 and 4, different flows have different protocols. In other cases,
    # we want to label the graphs with the protocols they follow
    protocol = ""
    if filename.split('.')[0] != 'test3' and filename.split('.')[0] != 'test4':
        protocol = flow_list[0][1].protocol

    # Send the plots to the graphing function
    graph.create_graphs(buffer_occ_dicts, packet_loss_dicts, network.curr_time, \
        link_rate_dicts, wind_size_dicts, flow_rate_dicts, packet_delay_dicts, \
        link_order, flow_order, filename.split('.')[0], protocol, network.curr_time + 1)
Beispiel #14
0
        wines: List = list(csv.reader(wine_file, quoting=csv.QUOTE_NONNUMERIC))
        shuffle(wines)
        for wine in wines:
            parameters: List[float] = [float(n) for n in wine[1:14]]
            wine_parameters.append(parameters)
            species: int = int(wine[0])
            if species == 1:
                wine_classifications.append([1.0, 0.0, 0.0])
            elif species == 2:
                wine_classifications.append([0.0, 1.0, 0.0])
            else:
                wine_classifications.append([0.0, 0.0, 1.0])
            wine_species.append(species)
    normalize_by_feature_scaling(wine_parameters)

    wine_network: Network = Network([13, 7, 3], 0.9)

    def wine_interpret_output(output: List[float]) -> int:
        if max(output) == output[0]:
            return 1
        elif max(output) == output[1]:
            return 2
        else:
            return 3

    wine_trainers: List[List[float]] = wine_parameters[0:150]
    wine_trainers_corrects: List[List[float]] = wine_classifications[0:150]
    for _ in range(10):
        wine_network.train(wine_trainers, wine_trainers_corrects)

    wine_testers: List[List[float]] = wine_parameters[150:178]
Beispiel #15
0
def main():
    run = True
    clock = pygame.time.Clock()
    n = Network()
    player = int(n.getP())
    print("You are player", player)

    while run:
        clock.tick(60)
        try:
            game = n.send("get")
        except:
            run = False
            print("Couldn't get game")
            break

        if game.bothWent():
            redrawWindow(win, game, player)
            pygame.time.delay(500)
            try:
                game = n.send("reset")
            except:
                run = False
                print("Couldn't get game")
                break

            font = pygame.font.SysFont("arial", 90)
            move1 = game.get_player_move(0)
            move2 = game.get_player_move(1)
            if (game.winner() == 1 and player == 1) or (game.winner() == 0
                                                        and player == 0):
                win.fill((0, 0, 0))

                if move1[0] == "R" and move2[0] == "S":
                    rock = pygame.image.load("images/rock.png").convert()
                    rock = pygame.transform.scale(rock, (200, 200))

                    scissors = pygame.image.load(
                        "images/scissors.png").convert()
                    scissors = pygame.transform.scale(scissors, (200, 200))

                    win.blit(rock, (200, 350))
                    win.blit(scissors, (500, 350))
                # win.blit(paper ,  ( 0,450))

                elif move1[0] == "S" and move2[0] == "R":
                    rock = pygame.image.load("images/rock.png").convert()
                    rock = pygame.transform.scale(rock, (200, 200))

                    scissors = pygame.image.load(
                        "images/scissors.png").convert()
                    scissors = pygame.transform.scale(scissors, (200, 200))

                    win.blit(scissors, (200, 350))
                    win.blit(rock, (500, 350))

                elif move1[0] == "R" and move2[0] == "P":

                    rock = pygame.image.load("images/rock.png").convert()
                    rock = pygame.transform.scale(rock, (200, 200))

                    paper = pygame.image.load("images/paper.png").convert()
                    paper = pygame.transform.scale(paper, (200, 200))

                    win.blit(rock, (200, 350))
                    win.blit(paper, (500, 350))
                elif move1[0] == "P" and move2[0] == "R":
                    rock = pygame.image.load("images/rock.png").convert()
                    rock = pygame.transform.scale(rock, (200, 200))

                    paper = pygame.image.load("images/paper.png").convert()
                    paper = pygame.transform.scale(paper, (200, 200))

                    win.blit(paper, (200, 350))
                    win.blit(rock, (500, 350))
                elif move1[0] == "S" and move2[0] == "P":
                    scissors = pygame.image.load(
                        "images/scissors.png").convert()
                    scissors = pygame.transform.scale(scissors, (200, 200))

                    paper = pygame.image.load("images/paper.png").convert()
                    paper = pygame.transform.scale(paper, (200, 200))

                    win.blit(scissors, (200, 350))
                    win.blit(paper, (500, 350))
                elif move1[0] == "P" and move2[0] == "S":
                    scissors = pygame.image.load(
                        "images/scissors.png").convert()
                    scissors = pygame.transform.scale(scissors, (200, 200))

                    paper = pygame.image.load("images/paper.png").convert()
                    paper = pygame.transform.scale(paper, (200, 200))

                    win.blit(paper, (200, 350))
                    win.blit(scissors, (500, 350))
                text = font.render("You Won!", 1, (255, 0, 0))

            elif game.winner() == -1:
                win.fill((0, 0, 0))
                if move1[0] == "R" and move2[0] == "R":
                    rock = pygame.image.load("images/rock.png").convert()
                    rock = pygame.transform.scale(rock, (200, 200))

                    rock1 = pygame.image.load("images/rock.png").convert()
                    rock1 = pygame.transform.scale(rock1, (200, 200))

                    win.blit(rock, (200, 350))
                    win.blit(rock1, (500, 350))
                if move1[0] == "S" and move2[0] == "S":
                    scissors = pygame.image.load(
                        "images/scissors.png").convert()
                    scissors = pygame.transform.scale(scissors, (200, 200))

                    scissors1 = pygame.image.load(
                        "images/scissors.png").convert()
                    scissors1 = pygame.transform.scale(scissors1, (200, 200))

                    win.blit(scissors, (200, 350))
                    win.blit(scissors1, (500, 350))
                if move1[0] == "P" and move2[0] == "P":
                    paper = pygame.image.load("images/paper.png").convert()
                    paper = pygame.transform.scale(paper, (200, 200))

                    paper1 = pygame.image.load("images/paper.png").convert()
                    paper1 = pygame.transform.scale(paper1, (200, 200))

                    win.blit(paper, (200, 350))
                    win.blit(paper1, (500, 350))

                text = font.render("Tie Game!", 1, (255, 0, 0))
            else:
                win.fill((0, 0, 0))
                if move1[0] == "R" and move2[0] == "S":
                    rock = pygame.image.load("images/rock.png").convert()
                    rock = pygame.transform.scale(rock, (200, 200))

                    scissors = pygame.image.load(
                        "images/scissors.png").convert()
                    scissors = pygame.transform.scale(scissors, (200, 200))

                    win.blit(rock, (200, 350))
                    win.blit(scissors, (500, 350))
                # win.blit(paper ,  ( 0,450))

                elif move1[0] == "S" and move2[0] == "R":
                    rock = pygame.image.load("images/rock.png").convert()
                    rock = pygame.transform.scale(rock, (200, 200))

                    scissors = pygame.image.load(
                        "images/scissors.png").convert()
                    scissors = pygame.transform.scale(scissors, (200, 200))

                    win.blit(scissors, (200, 350))
                    win.blit(rock, (500, 350))

                elif move1[0] == "R" and move2[0] == "P":

                    rock = pygame.image.load("images/rock.png").convert()
                    rock = pygame.transform.scale(rock, (200, 200))

                    paper = pygame.image.load("images/paper.png").convert()
                    paper = pygame.transform.scale(paper, (200, 200))

                    win.blit(rock, (200, 350))
                    win.blit(paper, (500, 350))
                elif move1[0] == "P" and move2[0] == "R":
                    rock = pygame.image.load("images/rock.png").convert()
                    rock = pygame.transform.scale(rock, (200, 200))

                    paper = pygame.image.load("images/paper.png").convert()
                    paper = pygame.transform.scale(paper, (200, 200))

                    win.blit(paper, (200, 350))
                    win.blit(rock, (500, 350))
                elif move1[0] == "S" and move2[0] == "P":
                    scissors = pygame.image.load(
                        "images/scissors.png").convert()
                    scissors = pygame.transform.scale(scissors, (200, 200))

                    paper = pygame.image.load("images/paper.png").convert()
                    paper = pygame.transform.scale(paper, (200, 200))

                    win.blit(scissors, (200, 350))
                    win.blit(paper, (500, 350))
                elif move1[0] == "P" and move2[0] == "S":
                    scissors = pygame.image.load(
                        "images/scissors.png").convert()
                    scissors = pygame.transform.scale(scissors, (200, 200))

                    paper = pygame.image.load("images/paper.png").convert()
                    paper = pygame.transform.scale(paper, (200, 200))

                    win.blit(paper, (200, 350))
                    win.blit(scissors, (500, 350))
                text = font.render("You Lost...", 1, (255, 0, 0))

            win.blit(text, (350, 200))
            pygame.display.update()
            pygame.time.delay(4000)

        for event in pygame.event.get():
            if event.type == pygame.QUIT:
                run = False
                pygame.quit()

            if event.type == pygame.MOUSEBUTTONDOWN:
                pos = pygame.mouse.get_pos()
                for btn in btns:
                    if btn.click(pos) and game.connected():
                        if player == 0:
                            if not game.p1Went:
                                n.send(btn.text)
                        else:
                            if not game.p2Went:
                                n.send(btn.text)

        redrawWindow(win, game, player)
    except FileNotFoundError:
        json_devices = {}
        print(
            '''No valid "data/device.json" found. Please create one with the following format:
{
    "00:00:00:00:00:00":
    {
      "type": "Device",
      "owner": "John Appleseed",
      "location": null,
      "allowed": true
    }
}
            ''')

    network = Network()

    try:
        devices = network.get_devices()
    except KeyboardInterrupt:
        print(
            'You stopped scanning. Scanning may take a while. If it takes too long, there may be a problem with the connection. Did you specify the correct network?'
        )
        sys.exit()

    for host, info in devices:
        info['mac'] = get_mac_address(ip=host)

    data = create_device_list(devices, json_devices)
    log_text = ''
Beispiel #17
0
                                                outputs=cost,
                                                updates=updates,
                                                givens={x: shared_train_data})

    def train_epochs(self, epochs=1, verbose=False):
        errors = []
        for i in range(epochs):
            cost = self.train_one_epochs()
            if verbose: print "epochs:", i + 1, "error:", cost
            errors.append(cost)
        return errors


if __name__ == "__main__":
    xor_data_values = [[1.0, 1.0], [1.0, 0.0], [0.0, 1.0], [0.0, 0.0]]
    xor_target_values = [[0], [1], [1], [0]]
    ds = (xor_data_values, xor_target_values)

    x = T.matrix('x')

    nnet = Network(input=x, layers=(2, 2, 1))
    trainer = BackProp(nnet=nnet, dataset=ds, learning_rate=0.5)

    errors = trainer.train_epochs(600)

    predict = theano.function(inputs=[x], outputs=nnet.output)
    print predict(numpy.array(xor_data_values))

    plt.plot(errors)
    plt.show()
Beispiel #18
0
# hyperparameters
ITERATIONS = 1 if args.evidence_file else int(args.iterations)
EVIDENCE_SIZE = int(args.evidence_size)
ACCEPT_ENTROPY = float(args.entropy_threshold)

distances = []
matches = []
merlin_times = []
approx_times = []
explained_variables = []
observed_entropies = []
for i in range(ITERATIONS):
	approx_time_sum = 0
	observed_entropies.append([])
	
	net = Network(UAI_FILE, args.evidence_file, args.files_folder, args.merlin_folder)
	
	net.read()  # Read the uai file to parse cardinatilies of the variables
	if not args.evidence_file:
		# EXPLANATION_LENGTH = net.size - EVIDENCE_SIZE
		net.random_evidence(EVIDENCE_SIZE)  # Create a random evidence of size EVIDENCE_SIZE and write it to an .evid file
		net.write_evi_file()
		
	# assert EVIDENCE_SIZE + EXPLANATION_LENGTH < len(net.cardinalities), 'cannot explain more'
	
	for k in range(net.size):
		import timeit, functools
		
		approx_time_sum += timeit.timeit( functools.partial( net.compute_marginals, ACCEPT_ENTROPY ), number=1 )
		if not net.new_observed_variables:
			break
Beispiel #19
0
from network import Network

net = Network()

net.generateImage()

print("END")
Beispiel #20
0
from network import Network
from utils import LOG_INFO
from layers import Relu, Sigmoid, Linear
from loss import EuclideanLoss
from solve_net import train_net, test_net
from load_data import load_mnist_2d
import numpy as np

train_data, test_data, train_label, test_label = load_mnist_2d('data')

# Your model defintion here
# You should explore different model architecture
model = Network()
model.add(Linear('fc1', 784, 300, 0.01))
model.add(Relu('Relu1'))
model.add(Linear('fc2', 300, 10, 0.01))

loss = EuclideanLoss(name='loss')

# Training configuration
# You should adjust these hyperparameters
# NOTE: one iteration means model forward-backwards one batch of samples.
#       one epoch means model has gone through all the training samples.
#       'disp_freq' denotes number of iterations in one epoch to display information.

config = {
    'learning_rate': 0.1,
    'weight_decay': 0.0001,
    'momentum': 0.7,
    'batch_size': 100,
    'max_epoch': 100,
Beispiel #21
0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@authors: Mihnea S. Teodorescu & Moe Assaf, University of Groningen
"""

#### Libraries
# Own libraries
from network import Network

#### Class declaration
net = Network([10000, 10, 10, 94])


class Recogniser():
    def __init__(self):
        pass

    def retrieve_char(self, img):
        # Compute the output of the neural network
        output = net.feed_forward(img)

        # Determine the most likely outcome and return its ASCII value
        max = 0
        char = 0
        for i in range(94):
            if (max < output[i]):
                max = output[i]
                char = i
        return chr(char + 33)
Beispiel #22
0
keys = [
    "conv_outdim32_size8_stride4_act_lrelu",
    "conv_outdim64_size10_stride5_act_lrelu",
    "flatten",
    "fc_outdim500_act_lrelu",
    "fc_outdim768_act_lrelu",
    "reshape_6_8_16",
    "deconv_outdim64_size10_stride5_act_none",
    "deconv_outdim3_size8_stride4_act_none"
]

split_index = 4
encoder_keys, decoder_keys = keys[:split_index], keys[split_index:]
input_shape = inp.get_shape().as_list()

encoder = Network(encoder_keys, input_shape)
latent = encoder(inp)
input_shape = latent.get_shape().as_list()
decoder = Network(decoder_keys, input_shape)
out = decoder(latent)
loss = tf.reduce_mean((out - inp) ** 2)

latent_phd = tf.placeholder(shape=(None, 500), dtype=tf.float32)
inp_phd = tf.placeholder(shape=(None, 120, 160, 3), dtype=tf.float32)
out_decoder_only = decoder(latent_phd)
loss_decoder_only = tf.reduce_mean((out_decoder_only - inp_phd) ** 2)

optimizer = tf.train.AdamOptimizer(1e-3)
train_encoder = optimizer.minimize(loss, var_list=encoder.variables)
train_decoder = optimizer.minimize(loss_decoder_only, var_list=decoder.variables)
train_all = optimizer.minimize(loss)
Beispiel #23
0
    parser.add_argument('-lr', '--learning-rate', type=float, default=1e-5)
    parser.add_argument('-s', '--step', type=int, default=-1, \
            help='total training step, -1 means infinite')
    parser.add_argument('-b', '--batch-size', type=int, default=256, \
            help='batch size of each step')
    parser.add_argument('-c', '--checkpoint', type=str, default=None, \
            help='path to load pretrained model, default for no loading')
    parser.add_argument('--val-step', type=int, default=10, \
            help='how many training steps before each validation')
    parser.add_argument(
        '--save-step',
        type=int,
        default=1000,
        help='how many training steps before each model-saving')
    parser.add_argument('--model-path', type=str, \
            default='train_log/models/', help='path to save model')
    parser.add_argument('--logdir', type=str, \
            default='train_log/train.events', \
            help='path to save training logs')
    return parser.parse_args()


# }}}

if __name__ == '__main__':
    args = get_args()
    print(args)

    network = Network(args)
    network.train()
Beispiel #24
0
    x = Tensor([[1, 2, 3], [1, 2, 3]])
    y = Tensor([7, 10])
    print(x.shape, y.shape)

    #linear_a = Linear(x.shape[1], 4, weight_init='ones')
    #linear_b = Linear(x.shape[0], y.shape[0], weight_init='ones')
    #relu = Relu()
    #net_2layer = Network([linear_a], 2)#, relu, linear_b])
    #print(x.view(-1, 2).shape)
    #print(net_2layer.forward(x.view(-1, 2)))

    linear1 = Linear(x.shape[0], x.shape[0], weight_init='ones')
    linear2 = Linear(x.shape[0], y.shape[0], weight_init='ones')

    net_2layer = Network([linear1, linear2], 1)

    mse = MSE()

    lr = 1e-3
    num_iter = 200

    timesteps = []
    loss_at_timesteps = []

    for it in range(num_iter):

        net_2layer.zero_grad()
        pred_2layer = net_2layer.forward(x)
        loss = mse.forward(pred_2layer, y)
        print("At iteration ", str(it), " the loss is ", loss)
Beispiel #25
0
####### Decerations #######
from network import Network
#import matplotlib.pyplot as plt
import funcs
###########################

training, test = funcs.loadMnist()
max_epochs = 15
no = 2
tau = max_epochs / 2
tauN = max_epochs / 5
sigmaP = 100
#batch_size = 50
trainBool = True
layers = [784, 100]

NN = Network(layers)

trainBool = NN.train(training, max_epochs, no, tau, tauN, sigmaP,
                     trainBool)  #Comment out line to run on saved weights
NN.test(test, trainBool)
NN.saveMetrics(max_epochs, no, tau, tauN, sigmaP, layers[-1])
NN.saveWeights()

# Testing, kinda

weight = funcs.loadWeights()

funcs.plotMetrics(max_epochs)
funcs.graphHeatmap()
funcs.weightPlot(weight)
Beispiel #26
0
def run_test():

    np.random.seed(config.RAND_SEED + 1932)

    net = Network()
    net.load_state_dict(torch.load('nn5.pt'))

    time = np.arange(0, config.TEST_SIZE, config.DELTA_T)
    ca1 = np.zeros(time.size)
    cb1 = np.zeros(time.size)
    ca2 = np.zeros(time.size)
    cb2 = np.zeros(time.size)
    control = np.zeros(time.size)
    targets = np.zeros(time.size)

    e_int = 0
    mse_error1 = 0.0
    mse_error2 = 0.0
    target = 5
    kinetic_constant = config.K

    control_prev = 0

    for i in range(1, time.size):
        if time[i] % config.STEP_T == 0:
            target = config.CB_MAX * np.random.random_sample()
        if time[i] % config.STEP_K == 0:
            kinetic_constant = config.K * (
                1 - config.K_VAR /
                2) + config.K * config.K_VAR * np.random.random_sample()

        # PID

        ca1[i] = ca1[i - 1] + (config.F / config.V *
                               (control[i - 1] - ca1[i - 1]) -
                               kinetic_constant * ca1[i - 1]) * config.DELTA_T
        cb1[i] = cb1[i -
                     1] + (-config.F / config.V * cb1[i - 1] +
                           kinetic_constant / 2 * ca1[i - 1]) * config.DELTA_T
        e_int += config.DELTA_T * (target - cb1[i] + target - cb1[i - 1]) / 2
        control[i] = config.K1 * (target -
                                  cb1[i]) + config.K2 * e_int + config.K3 * (
                                      cb1[i - 1] - cb1[i]) / config.DELTA_T
        mse_error1 += (target - cb1[i])**2
        targets[i] = target

        # NN

        inputs = torch.tensor([cb2[i - 1], target, cb2[i - 1] - target],
                              dtype=torch.float32)
        action = net(inputs)
        control_prev += action.item() * config.CA_MAX
        if control_prev < 0:
            control_prev = 0
        if control_prev > config.CA_MAX:
            control_prev = config.CA_MAX
        ca2[i] = ca2[i - 1] + (config.F / config.V *
                               (control_prev - ca2[i - 1]) -
                               kinetic_constant * ca2[i - 1]) * config.DELTA_T
        cb2[i] = cb2[i -
                     1] + (-config.F / config.V * cb2[i - 1] +
                           kinetic_constant / 2 * ca2[i - 1]) * config.DELTA_T
        mse_error2 += (target - cb2[i])**2

    print(f'PID total error: {mse_error1}')
    print(f'NN total error: {mse_error2}')
    plt.figure(1)
    # a, = plt.plot(time, cb1)
    a, = plt.plot(time, cb2)
    b, = plt.plot(time, targets)
    plt.xlabel("time (s)")
    plt.ylabel("Concentration of B (mol/m^3)")
    plt.legend([a, b], ["Output", "Target"])
    plt.show()
Beispiel #27
0
 def __init__(self, settings: Settings) -> None:
     self.settings = settings
     self.network = Network(settings)
     self.storage = Storage(settings)
Beispiel #28
0
        idata[y, x][1] = pixel
        idata[y, x][2] = pixel
        idata[y, x][3] = ubyte(255)

        y += 1 * (x == 27 and y != 27)
        x = (x + 1) * (x < 28 - 1)

    plt.imshow(idata)
    plt.show()


if __name__ == '__main__':
    with open("temp", "rb") as f:
        W, B = pickle.load(f)

    net = Network([784, 50, 10])
    net.W = W
    net.B = B

    tdata = load_data_wrapper()[2]

    while True:
        i = int(input("Training example: "))
        if i < 0:
            exit()

        tx, ty = tdata[i]

        x = net.feedforward(tx)[-1]

        a = x.argmax()
Beispiel #29
0
 def __init__(self, network=None):
     if network is None:
         network = Network(TOPOLOGY)
     self.network = network
     self.fitness = 0
     self.is_alive = 1
Beispiel #30
0
def main():
    # parse arguments
    train_date = '2021-04-09'
    args = parse_args(train_date)
    assert args.msnet, "还没有修改好 Network_fl"

    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu

    # define logger
    logging.basicConfig(filename=args.log_dir+"/"+args.phase+'_log.txt', level=logging.DEBUG, format='%(asctime)s %(message)s')
    logging.getLogger().addHandler(logging.StreamHandler())

    # args.source_1_train_list = '/home/liuyuan/shu_codes/datasets/multi_site_prostate/cfgs/train8_test2/ISBI_3_train.txt'
    # args.source_1_val_list = '/home/liuyuan/shu_codes/datasets/multi_site_prostate/cfgs/train8_test2/ISBI_3_test.txt'
    # args.source_2_train_list = '/home/liuyuan/shu_codes/datasets/multi_site_prostate/cfgs/train8_test2/ISBI_1.5_train.txt'
    # args.source_2_val_list = '/home/liuyuan/shu_codes/datasets/multi_site_prostate/cfgs/train8_test2/ISBI_1.5_test.txt'
    # args.source_3_train_list = '/home/liuyuan/shu_codes/datasets/multi_site_prostate/cfgs/train8_test2/I2CVB_train.txt'
    # args.source_3_val_list = '/home/liuyuan/shu_codes/datasets/multi_site_prostate/cfgs/train8_test2/I2CVB_test.txt'
    #
    # args.test_1_list = '/home/liuyuan/shu_codes/datasets/multi_site_prostate/cfgs/train8_test2/ISBI_3_test.txt'
    # args.test_2_list = '/home/liuyuan/shu_codes/datasets/multi_site_prostate/cfgs/train8_test2/ISBI_1.5_test.txt'
    # args.test_3_list = '/home/liuyuan/shu_codes/datasets/multi_site_prostate/cfgs/train8_test2/I2CVB_test.txt'
    # GPU 12000 上的位置
    # args.source1_dir = '/home/liuyuan/shu_codes/datasets/brats/splited_by_ManufacturerModelName_preprocessed/train/signa_excite_1_5'
    # args.source2_dir = '/home/liuyuan/shu_codes/datasets/brats/splited_by_ManufacturerModelName_preprocessed/train/signa_excite_3'
    # args.source3_dir = '/home/liuyuan/shu_codes/datasets/brats/splited_by_ManufacturerModelName_preprocessed/train/GENESIS_SIGNA_drop_3'

    # GPU 1000上的设置
    args.source1_dir = '/home/wangshu/datasets/brats/splited_by_ManufacturerModelName_preprocessed/train/signa_excite_1_5'
    args.source2_dir = '/home/wangshu/datasets/brats/splited_by_ManufacturerModelName_preprocessed/train/signa_excite_3'
    args.source3_dir = '/home/wangshu/datasets/brats/splited_by_ManufacturerModelName_preprocessed/train/GENESIS_SIGNA_drop_3'

    args.cost_kwargs = {
        "seg_dice": 1,
        "seg_ce": 0.1,
        "miu_seg_L2_norm": 1e-4,
        "student_hard_dice": 0.5,
        "student_soft_dice": 0.5,
        "student_inter_align": 0.001,
        "student_1": 1.0,
        "student_2": 1.0,
        "student_3": 1.0,
    }

    args.opt_kwargs = {
        "update_source_segmenter": False,
        "source_segmenter_fine_tune": False,
    }

    # print all parameters
    logging.info("Usage:")
    logging.info("    {0}".format(" ".join([x for x in sys.argv]))) 
    logging.debug("All settings used:")

    for k,v in (vars(args).items()): 
        logging.debug("    {0}: {1}".format(k,v))

    gpu_options = tf.GPUOptions(allow_growth=False)
    config_proto = tf.ConfigProto(gpu_options=gpu_options)
    off = rewriter_config_pb2.RewriterConfig.OFF

    config_proto.graph_options.rewrite_options.arithmetic_optimization = off

    # open session
    
    with tf.Session(config=config_proto) as sess:
        if args.phase == 'test':
            args.batch_size = 1

        logging.info("Network built")
        logging.info(f"Whether use MS-Net: {args.msnet}")
        if args.msnet:
            from network import Network
            network = Network(args)
        else:
            from network_fl import NetworkFL
            network = NetworkFL(args)

        # show network architecture
        show_all_variables()

        if args.phase == 'train':
            trainer = Trainer(args, sess, network=network)
            trainer.train()

        if args.phase == 'test':
            # tester = Tester(args, sess, network = network)
            # seg_dice = tester.test()
            pass