Пример #1
0
def start():
    flag_main = True
    while flag_main:
        print("Comands:filter, analys, read_data,gen_data, exit")
        a = input("Enter command:").lower().replace(' ', '')
        if a == 'analys':
            candidates_votes()
            party_votes()
            candidate_votes_democrat()
            candidate_votes_republican()
            state_votes_democrat()
            state_votes_republican()
        elif a == 'filter':
            print(""" Filter
            1 - party
            2 - state
            3 - state_abbr
            4 - candidate
            5 - votes >
            6 - votes <
            7 - quit
            """)

            b = int(input("Enter command:"))
            if b == 7:
                break
            if b == 1:
                print("""1 - Republican 2 - Democrat""")
                c = int((input("Enter command:")))
                if c == 1:
                    filter_party("Republican")
                if c == 2:
                    filter_party("Democrat")
            if b == 2:
                c = input("Enter state:")
                filter_state(c)
            if b == 3:
                c = input("Enter st_abbr:")
                filter_st_abbr(c)
            if b == 4:
                c = input("Enter candidate:")
                filter_candidate(c)
            if b == 5:
                c = int(input("Enter gt votes:"))
                filter_votes_gt(c)
            if b == 6:
                c = int(input("Enter ls votes:"))
                filter_votes_ls(c)

            if (b < 1 or b > 7):
                print("Error number")
                flag_main = True

        elif a == "read_data":
            read_data()
        elif a == "gen_data":
            c = int(input("Enter count:"))
            generate_data(c)
        elif a == "exit":
            break
def runsim(seed, tx_range, num_data, ROUND):
    random.seed(seed)
    sim = wsp.Simulator(timescale=0, until=50, terrain_size=(700, 700), visual=False)
    # sim = wsp.Simulator(timescale=0.1, until=50, terrain_size=(700, 700), visual=True)
    # place 100 nodes on 10x10 grid space
    nodes = generate_node(seed)
    for px, py in nodes:
        sim.add_node(PhaseI, (px, py))
    # sim.master = master
    sim.master = random.randint(0, 99)
    sim.tx_range = tx_range
    sim.run()

    sim2 = wsp.Simulator(timescale=0, until=50, terrain_size=(700, 700), visual=False)
    
    # copy data from PhaseI to PhaseII
    for n in sim.nodes:
        sim2.add_node(PhaseII, n.pos)
    for i in range(len(sim.nodes)):
        if sim2.nodes[i].id != sim.nodes[i].my_master:
            sim2.nodes[i].my_master = sim.nodes[i].my_master
            sim2.nodes[i].prev = sim.nodes[i].prev
        else:
            sim2.nodes[i].my_master = sim.nodes[i].my_master
            sim2.nodes[i].P = sim.nodes[i].P
            sim2.nodes[i].I = sim.nodes[i].I
            sim2.nodes[i].T = sim.nodes[i].T
        sim2.nodes[i].tx_range = sim.nodes[i].tx_range
    sim2.source, _ = generate_data(seed, num_data, 99)
    sim2.ROUND = ROUND
    sim2.run()

    s1 = sum([n.send_packets for n in sim.nodes])
    s2 = sum([n.send_packets for n in sim2.nodes])
    return s1 + s2
Пример #3
0
def save_dataset(trocar,
                 percentage,
                 choice,
                 sigma=5,
                 upper_bound=150,
                 N_lines=1000):

    lst = ['incorrect_data', 'noise', 'observed lines']
    data_path = '/home/bao/Downloads/Git/master_thesis/data'
    num_trocar = trocar.shape[0]

    if choice == lst[0]:
        list_noise_percentage = np.arange(start_range,
                                          end_range + step,
                                          step,
                                          dtype=np.uint8)
        pref = 'inc'
    elif choice == lst[1]:
        list_noise_percentage = np.arange(start_range + step,
                                          end_range + step,
                                          step,
                                          dtype=np.uint8)
        pref = 'sigma'
    else:
        list_noise_percentage = np.arange(start_range + step,
                                          end_range + step,
                                          step,
                                          dtype=np.uint8)
        list_noise_percentage = [
            element * 20 for element in list_noise_percentage
        ]
        pref = 'lines'

    path = os.path.join(data_path, pref)

    for num in list_noise_percentage:

        if choice == lst[0]:
            percentage[-1] = num / 100
        elif choice == lst[1]:
            sigma = num
        else:
            N_lines = num

        vect_start, vect_end, dict_gt = generate_data(N_lines=N_lines,
                                                      percentage=percentage,
                                                      trocar=trocar,
                                                      scale1=SCALE_COEF1,
                                                      scale2=SCALE_COEF2,
                                                      sigma=sigma,
                                                      upper_bound=upper_bound)

        file_name = pref + '_{:05d}'.format(num)

        np.savez(os.path.join(path, file_name + '.npz'),
                 vect_start=vect_start,
                 vect_end=vect_end)

        with open(os.path.join(path, file_name + '.pkl'), 'wb') as fp:
            pickle.dump(dict_gt, fp)
Пример #4
0
def main():
    seed = 123
    tx_range = 100
    num_data = 1
    ROUND = 1
    random.seed(seed)
    sim = wsp.Simulator(timescale=0, until=50, terrain_size=(700, 700), visual=False)
    nodes = generate_node(seed)
    for px, py in nodes:
        sim.add_node(PhaseI, (px, py))
    # sim.master = master
    sim.master = random.randint(0, 99)
    sim.tx_range = tx_range
    sim.run()

    sim2 = wsp.Simulator(timescale=3, until=50, terrain_size=(700, 700), visual=True)
    
    # copy data from PhaseI to PhaseII
    for n in sim.nodes:
        sim2.add_node(PhaseII, n.pos)
    for i in range(len(sim.nodes)):
        if sim2.nodes[i].id != sim.nodes[i].my_master:
            sim2.nodes[i].my_master = sim.nodes[i].my_master
            sim2.nodes[i].prev = sim.nodes[i].prev
        else:
            sim2.nodes[i].my_master = sim.nodes[i].my_master
            sim2.nodes[i].P = sim.nodes[i].P
            sim2.nodes[i].I = sim.nodes[i].I
            sim2.nodes[i].T = sim.nodes[i].T
        sim2.nodes[i].tx_range = sim.nodes[i].tx_range
    sim2.source, _ = generate_data(4, num_data, 99)
    sim2.ROUND = ROUND
    sim2.run()
Пример #5
0
def simulate(config, households, speed):
    start = get_max_timestamp()
    while True:
        end = start + timedelta(minute=1)
        sensor_data = {}
        for household_id in households:
            household = config[household_id-1]
            # simulate for household for one minute
            sensor_data[household_id] = generate_data(household, start, end)
        # send sensor_data
        send_data(household_id, sensor_data)

        # increase time with one minute
        start = end
Пример #6
0
def start():
    read_data_at_start()
    print(
        "Hello, stranger! Welcome to COURSEWORK9000. Type 'help' to call the help menu. Enter the command:"
    )
    while True:
        inputString = input(">> ").lower().replace(' ', '')
        if inputString == 'help':
            print(
                "Available commands:\n"
                "type 'help' to help\n"
                "type 'read' to read the data from .csv files on the computer\n"
                "type 'getc' to get all of the characteristicvar descriptions\n"
                "type 'getm' to get all of the measurementvar descriptions\n"
                "type 'generate' to generate some random data\n"
                "type 'filter' to filter data\n"
                "type 'analyse' to run the analyst functions\n"
                "type 'truncate' to truncate the database tables\n"
                "type 'exit' to exit")
        elif inputString == 'analyse':
            run_the_analysis()
        elif inputString == 'filter':
            filter_main()
        elif inputString == "read":
            read_data_fast()
        elif inputString == "generate":
            count = int(input("Enter the count\n>> "))
            generate_data(count)
        elif inputString == "truncate":
            truncate_db()
        elif inputString == 'getc':
            read_characteristicvars_table_all()
        elif inputString == 'getm':
            read_measurementvars_table_all()
        elif inputString == "exit":
            break
Пример #7
0
def plot_data():
    # Dataset and labels
    X, y = generate_data()

    x_0, y_0, x_1, y_1 = [], [], [], []

    for i in range(len(X)):
        if y[i] == 0:
            x_0.append(X[i][0])
            y_0.append(X[i][1])
        elif y[i] == 1:
            x_1.append(X[i][0])
            y_1.append(X[i][1])

    plt.scatter(x_0, y_0, c="r", alpha=0.5)
    plt.scatter(x_1, y_1, c="b", alpha=0.5)

    plt.xlabel("x")
    plt.ylabel("y")
    plt.title("Data generated by generate_data()")
    plt.savefig("data.png")
def main():
    #num_days = 10000
    #attack = np.zeros(num_days)
    ga = generate_data()
    date = pd.date_range(start=start_date, end=end_date)

    fp_temp = []
    holiday_data = []
    full_moons = ga.all_fm
    holidays = ga.holidays
    full_moon_array = np.zeros(len(date))
    holiday_array = np.zeros(len(date))
    temp_1 = start_date
    #print(int(start_date in all_full_moon))
    index = 0
    #print(all_full_moon)
    while temp_1 <= end_date:
        full_moon_array[index] = int(temp_1 in full_moons)
        holiday_array[index] = int(temp_1 in holidays)
        ga.loneWolf_attack()
        ga.set_holi_attack_fact(temp_1)
        ga.set_fm_attack_fact(temp_1)
        ga.set_rp_attack_fact()
        ga.set_tg_casualities()
        fp_temp.append(ga.tg_casualities + ga.total_lw_attack)
        temp_1 += dt.timedelta(1)
        index += 1
    #print((full_moon_array))
    #print(len(pd.date_range(start=start_date,end=end_date)))
    attack_df = pd.DataFrame({
        'Attacks': fp_temp,
        'Date': date,
    })
    #'Full_moons' : full_moon_array,
    #'Holidays': holiday_array})
    #   print(attack_df.head(10))
    attack_df.to_csv('../ML_predictor/1_complex_finetuned.csv')
    plt.plot(fp_temp)
Пример #9
0
    A[2, 0] = A[0, 2]
    A[2, 1] = A[1, 2]
    A[2, 2] = label.shape[0]

    B = np.zeros([3])
    B[0] = np.sum(np.dot(data[:, 0], label))
    B[1] = np.sum(np.dot(data[:, 1], label))
    B[2] = np.sum(label)

    [w1, w2, b] = np.linalg.solve(A, B)

    return lambda x: np.sign(w1 * x[:, 0] + w2 * x[:, 1] + b)


if __name__ == '__main__':
    data, label = generate_data(data_num, data_num1, data_num2)

    weight = np.ones(data_num) * (1. / data_num)

    def f(x):
        return 0

    plt.figure(figsize=(10, 10))
    err = []

    for i in range(
            200
    ):  # the main iterate, some rules should be added to end the loop
        s_data, s_label = sample_data(data, label, weight, data_num)
        new_f = select_f(s_data, s_label)
        e = np.sum((new_f(data) != label) * weight)
Пример #10
0
"""
   main.py

   Main file to run with the Python interpreter.  Generates some test data and runs it through our "polyamorous" Gale Shapley algorithm.
"""

from generate_data import *
from polygs import *

# Generate buyer & seller data.
# Optionally, pass poly=False to default to the standard Gale-Shapley algorithm.
buyers, sellers = generate_data(num_buyers=5, poly=True)

print "\nMatching", len(buyers), "buyers with", len(sellers), "sellers."

# Display the generated data.
print "\nBuyer data:"
for buyer in buyers:
    print "ID:", buyer, "-", buyers[buyer]
print "\nSeller data:"
for seller in sellers:
    print "ID:", seller, "-", sellers[seller]

# Run the algorithm.
matches = polyGS(buyers, sellers)

# Display the resulting matching.
print "\nMatches (in format (buyer, seller) ):"
for match in matches:
    print match
Пример #11
0
   print(model.summary())
   # Se muestra gráficamente la red
   plot_model(model, to_file='multiple_inputs.png')

   # Se crea el optimizador Adam
   adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=1.25)
   parallel_model = multi_gpu_model(model,gpus=2)
   #Compilamos el modelo
   parallel_model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['mae','accuracy'])
   
   print(model.output_shape)
   # model.fit([X_fija_29,X_mov_29,X_fija_27,X_mov_27],Y_train, batch_size=100, epochs=20,
   #           verbose=2, validation_data=(X_test, Y_test))

   #for iteraciones in range(0,1000):
   DVF = generate_data()
   DVF.generate_dvf()
   for i in range(0,20):

      patches_train = get_patches(sitk.ReadImage("brain.nii"),100)
      [X_27_moving_train, X_29_moving_train, X_27_fixed_train, X_29_fixed_train, Y_train] = patches_train.patches()
      # print('maximo',np.max(X_27_moving_train))
      # print('maximo', np.max(X_29_moving_train))
      # print('maximo', np.max(X_27_fixed_train))
      # print('maximo', np.max(X_29_fixed_train))

      #print((patches.Y.shape))
      patches_test = get_patches(sitk.ReadImage("brain.nii"), 25)
      [X_27_moving_test, X_29_moving_test, X_27_fixed_test, X_29_fixed_test, Y_test] = patches_test.patches()
      # print('maximo', np.max(X_27_moving_test))
      # print('maximo', np.max(X_29_moving_test))
Пример #12
0
         errors.append(error(tree, test_data))
     erros_main.append(np.mean(errors))
 plt.plot(Ms,erros_main)
 plt.xlabel("M")
 plt.ylabel("Average_error (%)")
 """
 
 # Q2
 Ms=np.arange(10000,150000, 25000)
 irrelevant=[]
 for m in Ms:
     irrels=[]
     for rep in range(15):
         irrel=[]
         sys.stdout.write("\r m:"+str(m)+" rep:"+str(rep))
         train_data=generate_data(m)
         tree=decision_tree(21, train_data,[],s=1500)
         stack=[tree]
         while stack:
             popped=stack.pop()
             if popped[0]!="p":
                 stack.append(popped[1][0])
                 stack.append(popped[1][1])
                 if popped[0]>14:
                     irrel=set(irrel).union(set([popped[0]]))
             
         irrels.append(len(irrel))
     irrelevant.append(np.mean(irrels))
     print(irrelevant)
     print("\n")
 plt.plot(Ms,irrelevant)
    sim.ROUND = ROUND
    # place nodes over 100x100 grids
    nodes = generate_node(seed)
    for px, py in nodes:
        node = sim.add_node(MyNode, (px, py))
        node.tx_range = tx_range
        node.logging = True

    # start the simulation
    sim.run()

    packets = sum([n.send_packets for n in sim.nodes])
    return packets


# runsim(0, 1, 99, 300)

seed = int(sys.argv[1])
with open(f"results_aodv_{seed}.csv", "w") as out:
    writer = csv.writer(out)
    writer.writerow(['seed', 'range', 'num_data', 'packets'])
    for i in range(5, 101, 5):
        n = i
        _, data = generate_data(seed, n, 99)
        for j in range(5):
            RANGE = 50 + (j + 1) * 50
            print(f"RUNNING...{seed}, {RANGE}, {n}")
            packets = 0
            for u, v in data:
                packets += runsim(seed, u, v, RANGE, 1)
            writer.writerow([seed, RANGE, n, packets])
Пример #14
0
# Imports
import numpy as np
from sklearn.neighbors import KNeighborsClassifier
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import os
from scipy.ndimage import filters
import matplotlib.cbook as cbook
from pylab import *

from generate_data import *

plt.switch_backend('agg')

# Dataset and labels
X, y = generate_data()

K = []
performance = []

# Run kNN with increasing K
for n in range(1, len(y)):
    neigh = KNeighborsClassifier(n)
    neigh.fit(X, y)

    prediction = neigh.predict(X)

    total = len(prediction)
    correct = sum(1 - np.absolute(y - prediction))

    K += [n]
Пример #15
0
if __name__ == '__main__':
    if process == 'generate_marginal_table':
        raw_data = pd.read_csv('data/dataset', header=None)
        attrs_list = generate_attribute_list(
            num_attribute, size_marginal_tables,
            num_marginal_tables)  # need consistency
        marginal_list = buil_marginal_table(attrs_list,
                                            raw_data,
                                            epsilon,
                                            is_noise=False)
        with open('data/marginal_table_no_noise.pickle', 'wb') as f:
            pickle.dump(marginal_list, f)

    elif process == 'generate_data':
        filename = 'data/marginal_table_10_laplace.pickle'
        data = load_marginal_table(filename)
        all_marginal_attrs = []
        for i in range(len(data)):
            all_marginal_attrs.append(data[i]['attrs'])
        sorted_list = sort_list(all_marginal_attrs)  # sort the list

        all_record = []
        for j in range(num_generate_data):
            record = generate_data(sorted_list, data, num_attribute)
            all_record.append(record)
        result = pd.DataFrame(all_record, columns=range(num_attribute))
        result = result.astype(int)

        result.to_csv('data/result_10_laplace.csv')
Пример #16
0
from generate_data import *
#generate data
generate_data(n=100, file_prefix="train")

#load data
"""
(train_data, train_label) = load_data(file_prefix="train")
print(train_data)
print(train_label)
"""
                self.params['W1'] -= learning_rate * dW1
                self.params['W2'] -= learning_rate * dW2
                self.params['W3'] -= learning_rate * dW3
                self.params['b1'] -= learning_rate * db1
                self.params['b2'] -= learning_rate * db2
                self.params['b3'] -= learning_rate * db3
        return loss_history

    def predict(self, X):
        hidden1 = np.maximum(0,
                             np.dot(X, self.params['W1']) + self.params['b1'])
        hidden2 = np.maximum(
            0,
            np.dot(hidden1, self.params['W2']) + self.params['b2'])
        y_pred = np.argmax(np.dot(hidden2,
                                  self.params['W3'] + self.params['b3']),
                           axis=1)
        # scores = np.dot(hidden2, self.params['W3']+self.params['b3'])
        return y_pred


if __name__ == "__main__":
    X1, Y1 = generate_data()
    x_train, y_train, x_test, y_test = data_split(X1, Y1)
    # data = batch(x_train, y_train)
    model = ThreeLayerFC()
    loss_history = model.train(x_train, y_train, epoch=10)
    y_pred = model.predict(x_test)
    print("accuracy: %.2f " % (np.mean(y_pred == y_test) * 100), "%")
    plt.plot(loss_history)
    plt.show()
Пример #18
0
from generate_data import *
from decision_trees import *

import numpy as np
import math
import matplotlib.pyplot as plt

if __name__ == "__main__":
    k = 4
    m1 = 100
    train_data = generate_data(k, m1)
    #print("\n\nTrain Data: \n")
    #print(train_data)

    tree1, train_error = train_model(k, train_data)
    #print(tree)
    print("\n Train error:")
    print(train_error)

    #Q3
    m2 = 1000
    test_data = generate_data(k, m2)
    print("\n Q3. Test error:")
    print(error(tree1, test_data))
    """
    print("\n Decision Tree: \n")
    tree2, train_error=train_model(k,train_data,ig=False)
    print("\n Train error:")
    print(train_error)
    
    print("\n Q3. Test error:")
Пример #19
0
def main():
    # generate the waste data only if it has not been already generated
    if not (os.path.exists('n.txt') and os.path.exists('avg_waste.txt')):
        generate_data()
    # plot data
    generate_plots('n.txt', 'avg_waste.txt')
Пример #20
0
def main():
    if not (os.path.exists('as_times.txt') and os.path.exists('rand_times.txt') and os.path.exists('N.txt')):
        generate_data()
    generate_plots('N.txt', 'rand_times.txt', 'as_times.txt')