Ejemplo n.º 1
0
def test():
    name = "test"
    network.clear(name)

    params = get_params(name=name,
                        FX_sel='basic',
                        location='hips',
                        dset_L='train',
                        dset_U='validation',
                        dset_V='validation',
                        ratio_L=1,
                        ratio_U=1,
                        ratio_V=1,
                        prediction=True,
                        pretrain='final',
                        runs=1,
                        epochs=10,
                        save_step=2,
                        oversampling=True,
                        G_label_sample=True,
                        G_label_factor=1,
                        C_basic_train=True,
                        R_active=True,
                        G_no=1,
                        D_no=1,
                        C_no=1,
                        log_name='log')

    train(params=params)
    GAN.get_prediction_accuracy(params)
Ejemplo n.º 2
0
def run(protocol, csvfile):
	for path in sorted(glob.glob(f'../../data/grid4/*.json')):
		state = tools.load_json(path)
		(node_count, link_count) = tools.json_count(state)

		print(f'run {protocol} on {path}')

		network.apply(state=state, link_command=get_tc_command, remotes=remotes)
		tools.sleep(10)

		software_start_ms = tools.millis()
		software.start(protocol, remotes)
		software_startup_ms = tools.millis() - software_start_ms

		tools.sleep(30)

		paths = tools.get_random_paths(state, 2 * link_count)
		paths = tools.filter_paths(state, paths, min_hops=2, path_count=link_count)
		ping_result = tools.ping_paths(remotes=remotes, paths=paths, duration_ms=30000, verbosity='verbose')

		sysload_result = tools.sysload(remotes)

		software.clear(remotes)

		# add data to csv file
		extra = (['node_count', 'software_startup_ms'], [node_count, software_startup_ms])
		tools.csv_update(csvfile, '\t', extra, ping_result.getData(), sysload_result)

		network.clear(remotes)

		# abort benchmark when less then 40% of the pings arrive
		if (ping_result.received / ping_result.transmitted) < 0.4:
			break
Ejemplo n.º 3
0
def run(protocol, csvfile, step_duration, step_distance):
    shared.seed_random(42)

    node_count = 50
    state = topology.create_nodes(node_count)
    mobility.randomize_positions(state, xy_range=1000)
    mobility.connect_range(state, max_links=150)

    # create network and start routing software
    network.apply(state, link_command=get_tc_command, remotes=remotes)
    software.start(protocol)

    test_beg_ms = shared.millis()
    for n in range(0, 30):
        print(f'{protocol}: iteration {n}')

        #with open(f'graph-{step_duration}-{step_distance}-{n:03d}.json', 'w+') as file:
        #	json.dump(state, file, indent='  ')

        # connect nodes range
        wait_beg_ms = shared.millis()

        # update network representation
        mobility.move_random(state, distance=step_distance)
        mobility.connect_range(state, max_links=150)

        # update network
        tmp_ms = shared.millis()
        network.apply(state=state,
                      link_command=get_tc_command,
                      remotes=remotes)
        #software.apply(protocol=protocol, state=state) # we do not change the node count
        network_ms = shared.millis() - tmp_ms

        # Wait until wait seconds are over, else error
        shared.wait(wait_beg_ms, step_duration)

        paths = ping.get_random_paths(state, 2 * 400)
        paths = ping.filter_paths(state, paths, min_hops=2, path_count=200)
        ping_result = ping.ping(paths=paths,
                                duration_ms=2000,
                                verbosity='verbose',
                                remotes=remotes)

        # add data to csv file
        extra = (['node_count',
                  'time_ms'], [node_count,
                               shared.millis() - test_beg_ms])
        shared.csv_update(csvfile, '\t', extra, ping_result.getData())

    software.clear(remotes)
    network.clear(remotes)
Ejemplo n.º 4
0
def run(protocol, csvfile):
	tools.seed_random(23)

	node_count = 50
	state = topology.create_nodes(node_count)
	mobility.randomize_positions(state, xy_range=1000)
	mobility.connect_range(state, max_links=150)

	# create network and start routing software
	network.apply(state=state, link_command=get_tc_command)
	software.start(protocol)
	tools.sleep(30)

	for step_distance in [50, 100, 150, 200, 250, 300, 350, 400]:
		print(f'{protocol}: step_distance {step_distance}')

		traffic_beg = tools.traffic()
		for n in range(0, 6):
			#with open(f'graph-{step_distance}-{n}.json', 'w+') as file:
			#	json.dump(state, file, indent='  ')

			# connect nodes range
			wait_beg_ms = tools.millis()

			# update network representation
			mobility.move_random(state, distance=step_distance)
			mobility.connect_range(state, max_links=150)

			# update network
			network.apply(state=state, link_command=get_tc_command)

			# Wait until wait seconds are over, else error
			tools.wait(wait_beg_ms, 10)

			paths = tools.get_random_paths(state, 2 * 200)
			paths = tools.filter_paths(state, paths, min_hops=2, path_count=200)
			ping_result = tools.ping_paths(paths=paths, duration_ms=2000, verbosity='verbose')

			packets_arrived_pc = 100 * (ping_result.received / ping_result.send)
			traffic_end = tools.traffic()

			# add data to csv file
			extra = (['node_count', 'time_ms', 'step_distance_m', 'n', 'packets_arrived_pc'], [node_count, tools.millis() - wait_beg_ms, step_distance, n, packets_arrived_pc])
			tools.csv_update(csvfile, '\t', extra, (traffic_end - traffic_beg).getData(), ping_result.getData())

			traffic_beg = traffic_end

	software.clear()
	network.clear()
Ejemplo n.º 5
0
def run(protocol, files, csvfile):
    tools.seed_random(1234)

    for path in sorted(glob.glob(files)):
        state = tools.load_json(path)
        (node_count, link_count) = tools.json_count(state)

        print(f'run {protocol} on {path}')

        network.apply(state=state, link_command=get_tc_command)

        tools.sleep(10)

        for offset in range(0, 60, 2):
            tmp_ms = tools.millis()
            traffic_beg = tools.traffic()
            traffic_ms = tools.millis() - tmp_ms

            tmp_ms = tools.millis()
            software.start(protocol)
            software_ms = tools.millis() - tmp_ms

            # Wait until wait seconds are over, else error
            tools.sleep(offset)

            paths = tools.get_random_paths(state, 2 * 200)
            paths = tools.filter_paths(state,
                                       paths,
                                       min_hops=2,
                                       path_count=200)
            ping_result = tools.ping_paths(paths=paths,
                                           duration_ms=2000,
                                           verbosity='verbose')

            traffic_end = tools.traffic()

            sysload_result = tools.sysload()

            software.clear()

            # add data to csv file
            extra = (['node_count', 'traffic_ms', 'software_ms', 'offset_ms'],
                     [node_count, traffic_ms, software_ms, offset * 1000])
            tools.csv_update(csvfile, '\t', extra,
                             (traffic_end - traffic_beg).getData(),
                             ping_result.getData(), sysload_result)

        network.clear()
Ejemplo n.º 6
0
def run(protocol, files, csvfile):
    for path in sorted(glob.glob(files)):
        state = shared.load_json(path)
        (node_count, link_count) = shared.json_count(state)

        # Limit node count to 300
        if node_count > 300:
            continue

        print(f'run {protocol} on {path}')

        network.apply(state=state,
                      link_command=get_tc_command,
                      remotes=remotes)

        shared.sleep(10)

        software_start_ms = shared.millis()
        software.start(protocol, remotes)
        software_startup_ms = shared.millis() - software_start_ms

        shared.sleep(300)

        start_ms = shared.millis()
        traffic_beg = traffic.traffic(remotes)

        paths = ping.get_random_paths(state, 2 * 200)
        paths = ping.filter_paths(state, paths, min_hops=2, path_count=200)
        ping_result = ping.ping(remotes=remotes,
                                paths=paths,
                                duration_ms=300000,
                                verbosity='verbose')

        traffic_ms = shared.millis() - start_ms
        traffic_end = traffic.traffic(remotes)

        sysload_result = shared.sysload(remotes)

        software.clear(remotes)
        network.clear(remotes)

        # add data to csv file
        extra = (['node_count', 'traffic_ms', 'software_startup_ms'],
                 [node_count, traffic_ms, software_startup_ms])
        shared.csv_update(csvfile, '\t', extra,
                          (traffic_end - traffic_beg).getData(),
                          ping_result.getData(), sysload_result)
Ejemplo n.º 7
0
def run(protocol, csvfile):
	shared.seed_random(1377)

	for path in sorted(glob.glob(f'../../data/freifunk/*.json')):
		state = shared.load_json(path)

		(node_count, link_count) = shared.json_count(state)
		dataset_name = '{}-{:04d}'.format(os.path.basename(path)[9:-5], node_count)

		# limit to what the host can handle
		if node_count > 310:
			continue

		print(f'run {protocol} on {path}')

		state = network.apply(state=state, link_command=get_tc_command, remotes=remotes)
		shared.sleep(10)

		software.start(protocol, remotes)

		shared.sleep(300)

		start_ms = shared.millis()
		traffic_beg = traffic.traffic(remotes)

		paths = ping.get_random_paths(state, 2 * node_count)
		paths = shared.filter_paths(state, paths, min_hops=2, path_count=node_count)
		ping_result = shared.ping(remotes=remotes, paths=paths, duration_ms=300000, verbosity='verbose')

		sysload_result = shared.sysload(remotes)

		traffic_ms = shared.millis() - start_ms
		traffic_end = traffic.traffic(remotes)
		software.clear(remotes)

		# add data to csv file
		extra = (['dataset_name', 'node_count', 'traffic_ms'], [dataset_name, node_count, traffic_ms])
		shared.csv_update(csvfile, '\t', extra, (traffic_end - traffic_beg).getData(), ping_result.getData(), sysload_result)

		network.clear(remotes)
Ejemplo n.º 8
0
def run(protocol, tasks, csvfile):
	for path, gateways in tasks:
		state = shared.load_json(path)
		(node_count, link_count) = shared.json_count(state)

		# Limit node count to 300
		if node_count > 300:
			continue

		print(f'run {protocol} on {path}')

		network.apply(state=state, remotes=remotes)

		shared.sleep(10)

		software_start_ms = shared.millis()
		software.start(protocol, remotes)
		software_startup_ms = shared.millis() - software_start_ms

		shared.sleep(30)

		start_ms = shared.millis()
		traffic_beg = traffic.traffic(remotes)

		paths = ping.get_paths_to_gateways(state, gateways)
		ping_result = ping.ping(remotes=remotes, paths=paths, duration_ms=300000, verbosity='verbose')

		traffic_ms = shared.millis() - start_ms
		traffic_end = traffic.traffic(remotes)

		sysload_result = shared.sysload(remotes)

		software.clear(remotes)
		network.clear(remotes)

		# add data to csv file
		extra = (['node_count', 'traffic_ms', 'software_startup_ms'], [node_count, traffic_ms, software_startup_ms])
		shared.csv_update(csvfile, '\t', extra, (traffic_end - traffic_beg).getData(), ping_result.getData(), sysload_result)
Ejemplo n.º 9
0
#!/usr/bin/env python3

import os
import sys
import glob

sys.path.append('../../')
import software
import network
import tools

software.clear()
network.clear()

prefix = os.environ.get('PREFIX', '')


# 100MBit LAN cable
def get_tc_command(link, ifname):
    return f'tc qdisc replace dev "{ifname}" root tbf rate 100mbit burst 8192 latency 1ms'


def run(protocol, files, csvfile):
    tools.seed_random(1234)

    for path in sorted(glob.glob(files)):
        state = tools.load_json(path)
        (node_count, link_count) = tools.json_count(state)

        print(f'run {protocol} on {path}')
Ejemplo n.º 10
0
import os
import sys
import glob

sys.path.append('../../')
from shared import Remote
import shared
import ping
import software
import network

remotes = [Remote()]  #[Remote('192.168.44.133'), Remote('192.168.44.137')]

shared.check_access(remotes)
software.clear(remotes)
network.clear(remotes)

prefix = os.environ.get('PREFIX', '')


# 100MBit LAN cable
def get_tc_command(link, ifname):
    return f'tc qdisc replace dev "{ifname}" root tbf rate 100mbit burst 8192 latency 1ms'


def run(protocol, csvfile):
    for path in sorted(glob.glob(f'../../data/grid4/*.json')):
        state = shared.load_json(path)
        (node_count, link_count) = shared.json_count(state)

        print(f'run {protocol} on {path}')
Ejemplo n.º 11
0
def train_GAN(params):

    # -------------------
    #  Parameters
    # -------------------

    log(str(params), name=params['log_name'])

    # Clear remaining model
    if params['ratio_L'] < 1.0 or params['ratio_U'] < 1.0:
        network.clear(params['name'] + '_R' + str(params['start_run']))
    plt.close('all')

    # -------------------
    #  CUDA
    # -------------------

    cuda = True if torch.cuda.is_available() else False
    G_Loss = torch.nn.BCELoss()
    D_Loss = torch.nn.BCELoss()
    C_Loss = torch.nn.BCELoss()

    if cuda:
        G_Loss.cuda()
        D_Loss.cuda()
        C_Loss.cuda()
        floatTensor = torch.cuda.FloatTensor
        log("CUDA Training.", name=params['log_name'])
        network.clear_cache()
    else:
        floatTensor = torch.FloatTensor
        log("CPU Training.", name=params['log_name'])

    # -------------------
    #  Data scaling
    # -------------------
    '''
    XTL ... Original labelled data
    XTU ... Original unlabelled data
    XTV ... Original validation data
    
    XL  ... Labelled data
    XU  ... Unlabelled data
    XV  ... Validation data
    '''

    dset_L = params['dset_L']
    dset_U = params['dset_U']
    dset_V = params['dset_V']

    if dset_L == dset_U:
        X, Y = pp.get_data(params, dset_L)
        XTL, XTU, YTL, YTU = pp.split_data(X, Y)
    else:
        XTL, YTL = pp.get_data(params, dset_L)
        XTU, YTU = pp.get_data(params, dset_U)

    if dset_V is None:
        XTV, YTV = XTU, YTU
    else:
        XTV, YTV = pp.get_data(params, dset_V)

    XTL = pp.scale_minmax(XTL)
    XTU = pp.scale_minmax(XTU)

    XTV = pp.scale_minmax(XTV)
    if params['ratio_V'] < 1.0:
        XTV, YTV = pp.select_random(XTV, YTV, params['ratio_L'])
        log("Selected %s of validation samples." %
            (format(params['ratio_V'], '0.2f')),
            name=params['log_name'])

    DL_V = pp.get_dataloader(params, XTV, YTV, batch_size=1024)

    # -------------------
    #  Load accuracy
    # -------------------

    mat_accuracy_G, mat_accuracy_D, mat_accuracy_C = network.load_Acc(params)

    if (params['R_active']):
        mat_accuracy_R = network.load_R_Acc(params)

    # -------------------
    #  Final prediction
    # -------------------

    if (params['prediction']):
        Y_pred = torch.zeros(XTU.shape[0], 8)

    # -------------------
    #  Start Training
    # -------------------

    YF = None
    PF = None
    RF = None

    for run in range(params['runs']):

        # -------------------
        #  Labelled Data
        # -------------------

        XL, YL = XTL, YTL

        if params['ratio_L'] < 1.0:
            XL, YL = pp.select_random(XL, YL, params['ratio_L'])
            log("Selected %s of labelled samples." %
                (format(params['ratio_L'], '0.2f')),
                name=params['log_name'])

        count_L = YL.shape[0]
        log("Number of labelled samples = %d." % (count_L),
            name=params['log_name'])

        DL_L = pp.get_dataloader(params, XL, YL)

        # -------------------
        #  Unlabelled Data
        # -------------------

        XU, YU = XTU, YTU

        if params['ratio_U'] < 1.0:
            XU, YU = pp.select_random(XU, YU, params['ratio_U'])
            log("Selected %s of unlabelled samples." %
                (format(params['ratio_U'], '0.2f')),
                name=params['log_name'])

        log("Number of unlabelled samples = %d." % (XU.shape[0]),
            name=params['log_name'])

        DL_U_iter = pp.get_perm_dataloader(params, XU, YU)

        # -------------------
        #  Networks
        # -------------------

        G, D, C = network.load_GAN(run, params)

        if (params['R_active']):
            R = network.load_Ref(run, params)

        # -------------------
        #  Optimizers
        # -------------------

        optimizer_G = torch.optim.Adam(G.parameters(),
                                       lr=params['GLR'],
                                       betas=(params['GB1'], params['GB2']))
        optimizer_D = torch.optim.Adam(D.parameters(),
                                       lr=params['DLR'],
                                       betas=(params['DB1'], params['DB2']))
        optimizer_C = torch.optim.Adam(C.parameters(),
                                       lr=params['CLR'],
                                       betas=(params['CB1'], params['CB2']))

        if (params['R_active']):
            optimizer_R = torch.optim.Adam(R.parameters(),
                                           lr=params['CLR'],
                                           betas=(params['CB1'],
                                                  params['CB2']))

        # -------------------
        #  Training
        # -------------------

        if run >= params['start_run']:

            if params['oversampling']:
                XL, YL = pp.over_sampling(params, XL, YL)
                log("Oversampling: created %d new labelled samples." %
                    (XL.shape[0] - count_L),
                    name=params['log_name'])

            for epoch in range(params['epochs']):

                # Jump to start epoch
                if run == params['start_run']:
                    if epoch < params['start_epoch']:
                        continue

                running_loss_G = 0.0
                running_loss_D = 0.0
                running_loss_C = 0.0
                """
                      X1, P1      - Labelled Data,      predicted Labels (C)                             | Regular training of classifier
                W1 = (X1, Y1), A1 - Labelled Data,      actual Labels,        predicted Authenticity (D) | Real samples
                W2 = (X2, Y2), A2 - Unlabelled Data,    predicted Labels (C), predicted Authenticity (D) | Real data with fake labels
                W3 = (X3, Y3), A3 - Synthetic Data (G), actual Labels,        predicted Authenticity (D) | Fake data with real labels
                W4 = (X4, Y4), A4 - Unlabbeled Data,    predicted Labels (C), predicted Authenticity (D) | Fake positive to prevent overfitting
                      XV, YV,  PV - Validation Data,    actual Labels,        predicted Labels (C)       | Validation samples
                  R1, F2, F3,  R4 - Real/Fake Labels
                """
                for i, data in enumerate(DL_L, 1):

                    loss_G = []
                    loss_D = []
                    loss_C = []

                    # -------------------
                    #  Train the classifier on real samples
                    # -------------------
                    X1, Y1 = data
                    W1 = torch.cat((X1, Y1), dim=1)
                    R1 = floatTensor(W1.shape[0], 1).fill_(1.0)

                    if params['C_basic_train']:
                        optimizer_C.zero_grad()
                        P1 = C(X1)
                        loss = C_Loss(P1, Y1)
                        loss_C.append(loss)
                        loss.backward()
                        optimizer_C.step()

                    if params['R_active']:
                        optimizer_R.zero_grad()
                        PR = R(X1)
                        loss = C_Loss(PR, Y1)
                        loss.backward()
                        optimizer_R.step()

                    # -------------------
                    #  Train the discriminator to label real samples
                    # -------------------
                    optimizer_D.zero_grad()
                    A1 = D(W1)
                    loss = D_Loss(A1, R1)
                    loss_D.append(loss)
                    loss.backward()
                    optimizer_D.step()

                    # -------------------
                    #  Classify unlabelled data
                    # -------------------
                    optimizer_C.zero_grad()
                    X2 = DL_U_iter.get_next()[0]
                    Y2 = C(X2)
                    W2 = torch.cat((X2, Y2), dim=1)

                    # -------------------
                    #  Train the classifier to label unlabelled samples
                    # -------------------
                    A2 = D(W2)
                    R2 = floatTensor(W2.shape[0], 1).fill_(1.0)
                    loss = C_Loss(A2, R2)
                    loss_C.append(loss)
                    loss.backward()
                    optimizer_C.step()

                    # -------------------
                    #  Train the discriminator to label predicted samples
                    # -------------------
                    optimizer_D.zero_grad()
                    A2 = D(W2.detach())
                    F2 = floatTensor(W2.shape[0], 1).fill_(0.0)
                    loss = D_Loss(A2, F2)
                    loss_D.append(loss)
                    loss.backward()
                    optimizer_D.step()

                    # -------------------
                    #  Train the discriminator to label fake positive samples
                    # -------------------
                    X4 = DL_U_iter.get_next()[0]
                    Y4 = C(X4)
                    W4 = torch.cat((X4, Y4), dim=1)

                    optimizer_D.zero_grad()
                    A4 = D(W4)
                    R4 = floatTensor(W4.shape[0], 1).fill_(1.0)
                    loss = D_Loss(A4, R4)
                    loss_D.append(loss)
                    loss.backward()
                    optimizer_D.step()

                    # -------------------
                    #  Create Synthetic Data
                    # -------------------
                    optimizer_G.zero_grad()
                    if params['G_label_sample']:
                        # Selected Labels from a uniform distribution of available labels
                        Y3 = floatTensor(
                            pp.get_one_hot_labels(params=params,
                                                  num=Y1.shape[0] *
                                                  params['G_label_factor']))
                    else:
                        # Select labels from current training batch
                        Y3 = torch.cat(
                            ([Y1 for _ in range(params['G_label_factor'])]),
                            dim=0)

                    Z = floatTensor(
                        np.random.normal(0, 1,
                                         (Y3.shape[0], params['noise_shape'])))
                    I3 = torch.cat((Z, Y3), dim=1)
                    X3 = G(I3)
                    W3 = torch.cat((X3, Y3), dim=1)

                    # -------------------
                    #  Train the generator to fool the discriminator
                    # -------------------
                    A3 = D(W3)
                    R3 = floatTensor(W3.shape[0], 1).fill_(1.0)
                    loss = G_Loss(A3, R3)
                    loss_G.append(loss)
                    loss.backward()
                    optimizer_G.step()

                    # -------------------
                    #  Train the discriminator to label synthetic samples
                    # -------------------
                    optimizer_D.zero_grad()
                    A3 = D(W3.detach())
                    F3 = floatTensor(W3.shape[0], 1).fill_(0.0)
                    loss = D_Loss(A3, F3)
                    loss_D.append(loss)
                    loss.backward()
                    optimizer_D.step()

                    # -------------------
                    #  Calculate overall loss
                    # -------------------
                    running_loss_G += np.mean([loss.item() for loss in loss_G])
                    running_loss_D += np.mean([loss.item() for loss in loss_D])
                    running_loss_C += np.mean([loss.item() for loss in loss_C])

                # -------------------
                #  Post Epoch
                # -------------------

                logString = "[Run %d/%d] [Epoch %d/%d] [G loss: %f] [D loss: %f] [C loss: %f]" % (
                    run + 1, params['runs'], epoch + 1, params['epochs'],
                    running_loss_G / (i), running_loss_D /
                    (i), running_loss_C / (i))
                log(logString, save=False, name=params['log_name'])

                if (epoch + 1) % params['save_step'] == 0:
                    idx = run, int(epoch / params['save_step']) + 1

                    acc_D_real = []
                    acc_D_vs_C = []
                    acc_D_vs_G = []
                    acc_C_real = []

                    for data in DL_V:

                        XV, YV = data

                        # Predict labels
                        PV = C(XV)

                        if params['R_active']:
                            PR = R(XV)
                            mat_accuracy_R[idx] = get_accuracy(PR, YV)
                            network.save_Ref(params['name'], run, R)
                            network.save_R_Acc(params, mat_accuracy_R)

                        # Generate Synthetic Data
                        Z = floatTensor(
                            np.random.normal(
                                0, 1, (YV.shape[0], params['noise_shape'])))
                        IV = torch.cat((Z, YV), dim=1)
                        XG = G(IV)

                        # Estimate Discriminator Accuracy
                        WV1 = torch.cat((XV, YV), dim=1)
                        WV2 = torch.cat((XV, PV), dim=1)
                        WV3 = torch.cat((XG, YV), dim=1)
                        RV1 = floatTensor(WV1.shape[0], 1).fill_(1.0)
                        FV2 = floatTensor(WV2.shape[0], 1).fill_(0.0)
                        FV3 = floatTensor(WV3.shape[0], 1).fill_(0.0)

                        AV1 = D(WV1)
                        AV2 = D(WV2)
                        AV3 = D(WV3)

                        acc_D_real.append(get_accuracy_binary(AV1, RV1))
                        acc_D_vs_C.append(get_accuracy_binary(AV2, FV2))
                        acc_D_vs_G.append(get_accuracy_binary(AV3, FV3))

                        acc_C_real.append(get_accuracy(PV, YV))

                    acc_D_real = np.mean(acc_D_real)
                    acc_D_vs_C = np.mean(acc_D_vs_C)
                    acc_D_vs_G = np.mean(acc_D_vs_G)
                    acc_D = .5 * acc_D_real + .25 * acc_D_vs_G + .25 * acc_D_vs_C
                    mat_accuracy_D[idx] = acc_D

                    acc_C_real = np.mean(acc_C_real)
                    acc_C_vs_D = 1.0 - acc_D_vs_C
                    acc_C = .5 * acc_C_real + .5 * acc_C_vs_D
                    mat_accuracy_C[idx] = acc_C_real

                    acc_G = 1.0 - acc_D_vs_G
                    mat_accuracy_G[idx] = acc_G

                    logString = "[Run %d/%d] [Epoch %d/%d] [G acc: %f] [D acc: %f | vs Real: %f | vs G: %f | vs C: %f] [C acc: %f | vs Real: %f | vs D: %f]" % (
                        run + 1, params['runs'], epoch + 1, params['epochs'],
                        acc_G, acc_D, acc_D_real, acc_D_vs_G, acc_D_vs_C,
                        acc_C, acc_C_real, acc_C_vs_D)
                    log(logString, save=True, name=params['log_name'])

                    network.save_GAN(params['name'], run, G, D, C)
                    params['start_epoch'] = epoch + 1
                    network.save_Parameter(params)
                    network.save_Acc(params, mat_accuracy_G, mat_accuracy_D,
                                     mat_accuracy_C)

            # End of Training Run
            params['start_run'] = run + 1
            params['start_epoch'] = 0
            network.save_Parameter(params)

        # -------------------
        #  Post Run
        # -------------------

        acc_C_real = []

        for data in DL_V:

            XV, YV = data

            # # Generate Synthetic Data
            # Z = floatTensor(np.random.normal(0, 1, (YV.shape[0], params['noise_shape'])))
            # IV = torch.cat((Z,YV),dim=1)
            # XG = G(IV)

            # Classify Validation data
            PC = C(XV)
            acc_C_real.append(get_accuracy(PC, YV))

            if params['R_active']:
                if RF == None:
                    RF = R(XV)
                else:
                    RF = torch.cat((RF, R(XV).detach()), 0)

            if YF == None:
                YF = YV
                PF = PC
            else:
                YF = torch.cat((YF, YV), 0)
                PF = torch.cat((PF, PC), 0)

        mat_accuracy_C[run] = np.mean(acc_C_real)

        # -------------------
        #  Final prediction
        # -------------------

        if (params['prediction']):
            C.hard = False
            XP = pp.get_tensor(XTU, None)[0]
            YP = C(XP)
            Y_pred += YP.cpu().detach()
            C.hard = True

    # -------------------
    #  Post Training
    # -------------------

    timeline = np.arange(0, params['epochs'] + 1, params['save_step'])

    # -------------------
    #  Plot Accuracy
    # -------------------

    acc_G = np.mean(mat_accuracy_G, axis=0)
    std_G = np.std(mat_accuracy_G, axis=0)
    acc_D = np.mean(mat_accuracy_D, axis=0)
    std_D = np.std(mat_accuracy_D, axis=0)
    acc_C = np.mean(mat_accuracy_C, axis=0)
    std_C = np.std(mat_accuracy_C, axis=0)
    if params['R_active']:
        acc_R = np.mean(mat_accuracy_R, axis=0)

    fig, ax = plt.subplots()

    legend = []
    cmap = plt.get_cmap('gnuplot')
    indices = np.linspace(0, cmap.N, 7)
    colors = [cmap(int(i)) for i in indices]

    ax.plot(timeline, acc_C, c=colors[0], linestyle='solid')
    ax.fill_between(timeline,
                    acc_C - std_C,
                    acc_C + std_C,
                    alpha=0.3,
                    facecolor=colors[0])
    legend.append("Accuracy $A_C$")

    ax.plot(timeline, acc_D, c=colors[1], linestyle='dashed')
    ax.fill_between(timeline,
                    acc_D - std_D,
                    acc_D + std_D,
                    alpha=0.3,
                    facecolor=colors[1])
    legend.append("Accuracy $A_D$")

    ax.plot(timeline, acc_G, c=colors[2], linestyle='dotted')
    ax.fill_between(timeline,
                    acc_G - std_G,
                    acc_G + std_G,
                    alpha=0.3,
                    facecolor=colors[2])
    legend.append("Accuracy $A_G$")

    Y_max = 1.15
    if params['R_active']:
        ax.plot(timeline, acc_R, c=colors[3], linestyle='dashdot')
        legend.append("Accuracy $A_R$")

        perf = np.zeros_like(acc_C)
        perf[0] = 0.0
        perf[1:] = (acc_C[1:] - acc_R[1:]) / acc_R[1:]

        ax.plot(timeline, perf + 1, c=colors[4], linestyle='solid')
        legend.append("Performance $P_C$")

    ax.set_xlim(0.0, params['epochs'])
    ax.set_ylim(0.0, Y_max)

    ax.legend(legend, fontsize=20)
    ax.set_xlabel('Epoch', fontsize=20)
    ax.set_ylabel('Accuracy', fontsize=20)

    ax.grid()
    save_fig(params, 'eval', fig)

    # -------------------
    #  Compare Classifier to Baseline
    # -------------------

    if params['R_active']:
        maxC = np.argmax(acc_C, axis=0)
        bestC = acc_C[maxC]
        maxR = np.argmax(acc_R, axis=0)
        bestR = acc_R[maxR]
        log(' - Peak Accuracy: C: %s after %d epochs | R: %s after %d epochs | Inc: %s'
            % (format((bestC), '0.4f'), timeline[maxC], format(
                (bestR), '0.4f'), timeline[maxR],
               format((bestC - bestR) / bestR, '0.4f')),
            name='results')

        Y_max = max(Y_max, max(perf + 1) + 0.025)

        maxP = np.argmax(perf, axis=0)
        log(' - Hightest $P_C$: %s after %d epochs.' % (format(
            (perf[maxP]), '0.4f'), timeline[maxP]),
            name='results')

        adva = np.zeros_like(acc_C)
        for i, v1 in enumerate(acc_C):
            for j, v2 in enumerate(acc_R):
                if v2 >= v1:
                    adva[i] = j - i
                    break

        maxA = np.argmax(adva, axis=0)
        log(' - Biggest Advantage: %d epochs after %d epochs.' %
            (adva[maxA] * params['save_step'], timeline[maxA]),
            name='results')

    # -------------------
    #  Log Results
    # -------------------

    if params['evaluate']:
        log(" - %s ( %s | %s ):  [C acc: %f ( ± %f )]" %
            (params['name'], params['dset_V'], params['location'], acc_C[-1],
             std_C[-1]),
            name='results')
    else:
        log(" - " + params['name'] +
            ": [C acc: %f ( ± %f )] [D acc: %f ( ± %f )] [G acc: %f ( ± %f )]"
            %
            (acc_C[-1], std_C[-1], acc_D[-1], std_D[-1], acc_G[-1], std_G[-1]),
            name='results')

    # -------------------
    #  Generate Confusion Matrix
    # -------------------

    YF = pp.one_hot_to_labels(params, YF)
    PF = pp.one_hot_to_labels(params, PF)

    con_mat = confusion_matrix(YF,
                               PF,
                               labels=None,
                               sample_weight=None,
                               normalize='true')
    if params['evaluate']:
        plot_confusion_matrix(con_mat,
                              params,
                              name='%s_%s' %
                              (params['dset_V'], params['location']),
                              title='Confusion matrix')
    else:
        plot_confusion_matrix(con_mat,
                              params,
                              name='C',
                              title='Confusion matrix')

    if params['R_active']:
        RF = pp.one_hot_to_labels(params, RF)
        con_mat = confusion_matrix(YF,
                                   RF,
                                   labels=None,
                                   sample_weight=None,
                                   normalize='true')
        plot_confusion_matrix(con_mat,
                              params,
                              name='R',
                              title='Confusion matrix')

    # -------------------
    #  Final prediction
    # -------------------

    if (params['prediction']):
        network.make_dir_pre()
        pred = torch.argmax(Y_pred, axis=1)
        f = open(network.S_PATH + params['name'] + '_predictions.txt', "w")
        for y in pred:
            f.write(' '.join(['%.6f' % (float(y.item() + 1))] * 500) + '\n')
        f.close()
def reset():
    global tasks_encoded
    tasks_encoded = {}
    nn.clear()