Example #1
0
def netparam(name, filt, tau, nbclust, sigma, homeinv, jitter, timestr, dataset, R, nb_learn=10, maxevts = None, ds_ev = None, jitonic = [None,None], verbose = False):
    if verbose:
        print(f'The dataset used is: {dataset}')
    if name=='hots':
        homeo = False
        homeotest = False
        krnlinit = 'first'
        hotshom = network(krnlinit=krnlinit, filt=filt, tau=tau, R=R, nbclust=nbclust, homeo=homeo, sigma=sigma, homeinv=homeinv, jitter=jitter, timestr=timestr)
        hotshom = hotshom.learning1by1(dataset=dataset, nb_digit = nb_learn, maxevts = maxevts, ds_ev = ds_ev, jitonic = jitonic, verbose=verbose)
    elif name=='homhots':
        homeo = True
        homeotest = False
        krnlinit = 'rdn'
        hotshom = network(krnlinit=krnlinit, filt=filt, tau=tau, R=R, nbclust=nbclust, homeo=homeo, sigma=sigma, homeinv=homeinv, jitter=jitter, timestr=timestr)
        hotshom = hotshom.learningall(dataset=dataset, nb_digit = nb_learn, maxevts = maxevts, ds_ev = ds_ev, jitonic = jitonic, verbose=verbose)
    elif name=='fullhom':
        homeo = True
        homeotest = True
        krnlinit = 'rdn'
        hotshom = network(krnlinit=krnlinit, filt=filt, tau=tau, R=R, nbclust=nbclust, homeo=homeo, sigma=sigma, homeinv=homeinv, jitter=jitter, timestr=timestr)
        hotshom = hotshom.learningall(dataset=dataset, nb_digit = nb_learn, maxevts = maxevts, ds_ev = ds_ev, jitonic = jitonic, verbose=verbose)
    elif name=='onlyonline':
        homeo = False
        homeotest = False
        krnlinit = 'rdn'
        hotshom = network(krnlinit=krnlinit, filt=filt, tau=tau, R=R, nbclust=nbclust, homeo=homeo, sigma=sigma, homeinv=homeinv, jitter=jitter, timestr=timestr)
        hotshom = hotshom.learningall(dataset=dataset, nb_digit = nb_learn, maxevts = maxevts, ds_ev = ds_ev, jitonic = jitonic, verbose=verbose)
    return hotshom, homeotest
Example #2
0
def train_optimized(X_train, y_train):
    #Using optimized hyper parameters found, trains a network and returns it
    #Initialize network
    input_neurons = len(X_train[:, 0])
    output_neurons = 1
    net = network((input_neurons, 100, output_neurons), smoosh_weights=False)
    batch_size = 5
    #Test if setting biases to 1 helps
    #net.set_bias()
    #Set number of training epochs
    number_of_epochs = 50
    #set the training rate
    net.learning_rate = 1
    #Set regularization
    net.L2 = 0
    t0 = time()
    plot_e = list()
    plot_acc = list()
    plot_TP = list()
    plot_bacc = list()
    for e in range(number_of_epochs):
        net.metric_acc, net.metric_con = net.test(X_test, y_test)
        #Keep track of raining progress and time usage
        print("\rRunning time after {} epoch: {}".format(e,
                                                         time() - t0),
              end="",
              flush=True)
        #Create random index vector
        k = net.make_batch_index(len(X_train[0, :]))
        #Calculate number of batches and loop through
        for i in range(len(k) // batch_size):
            batch = k[i * batch_size:(i + 1) * batch_size]
            net.train(X_train[:, batch], y_train[batch])
        bacc = net.metric_con[0, 0] / (net.metric_con[0, 0] +
                                       net.metric_con[1, 0])
        bacc += net.metric_con[1, 1] / (net.metric_con[1, 1] +
                                        net.metric_con[0, 1])
        bacc = bacc / 2
        plot_e.append(e)
        plot_acc.append(net.metric_acc)
        plot_TP.append(net.metric_con[0, 0])
        plot_bacc.append(bacc)
    data = {
        "ACC": plot_acc,
        "BACC": plot_bacc,
        "TP": plot_TP,
        "Epochs": plot_e
    }
    net.history = pd.DataFrame(data)
    return net
def readFromMnist():
    input_nodes = 784
    hidden_nodes = 300
    output_nodes = 10
    learning_rate = 0.2

    n = network(input_nodes, hidden_nodes, output_nodes, learning_rate)

    training_data_file = open("mnist_dataset/mnist_train_100.csv", "r")
    training_data_list = training_data_file.readlines()
    training_data_file.close()

    epochs = 5

    for e in range(epochs):
        for record in training_data_list:
            all_values = record.split(',')
            inputs = (numpy.asfarray(all_values[1:]) / 255.0 * 0.99) + 0.01
            targets = numpy.zeros(output_nodes) + 0.01
            targets[int(all_values[0])] = 0.99
            n.train(inputs, targets)
            pass
        pass

    test_data_file = open("mnist_dataset/mnist_test_10.csv", 'r')
    test_data_list = test_data_file.readlines()
    test_data_file.close()

    scorecard = []
    for record in test_data_list:
        all_values = record.split(',')
        correct_label = int(all_values[0])
        inputs = (numpy.asfarray(all_values[1:]) / 255.0 * 0.99) + 0.01
        outputs = n.query(inputs)
        label = numpy.argmax(outputs)
        if label == correct_label:
            scorecard.append(1)
        else:
            scorecard.append(0)
            pass

        pass

    scorecard_array = numpy.asarray(scorecard)
    print(scorecard)
    print("performance = ", scorecard_array.sum() / scorecard_array.size)
Example #4
0
def fit_franke():
    #Uses optimized hyperparameters found with the gridsearch
    #function and tests performance against
    #validation set
    #Initialize network
    input_neurons = len(X2[0, :])
    output_neurons = 1
    net = network((input_neurons, 100, output_neurons))
    net.cost_function = "square"
    net.fit_function = True
    #Set batch size
    batch_size = 5
    #Set number of training epochs
    number_of_epochs = 500
    #set the training rate
    net.learning_rate = 1
    #Set regularization
    net.L2 = 0

    results_mse = list()
    results_r2 = list()
    epochs = np.arange(number_of_epochs)
    #Create time stamp to track calculation times
    t0 = time()
    for e in range(number_of_epochs):
        #Keep track of raining progress and time usage
        print("\rRunning time after {} epoch: {}".format(e,
                                                         time() - t0),
              end="",
              flush=True)
        #Create random index vector
        k = net.make_batch_index(len(X_train[0, :]))
        #Calculate number of batches and loop through
        for i in range(len(k) // batch_size):
            batch = k[i * batch_size:(i + 1) * batch_size]
            net.train(X_train[:, batch], y_train[batch])
        #Calculate results for current settings
        z = net.test(X2.T, true2)
        r2 = R2(true2, z[-1])
        mse = MSE(true2, z[-1])
        results_mse.append(mse)
        results_r2.append(r2)

    data = {"MSE": results_mse, "R2": results_r2, "Number epochs": epochs}
    return pd.DataFrame(data)
Example #5
0
parser = argparse.ArgumentParser(description='PyTorch CornerNet Demo')
parser.add_argument('--conf_thres',
                    default=0.01,
                    type=float,
                    help='object threshold')
parser.add_argument('--nms_thres',
                    type=float,
                    default=0.3,
                    help='iou threshold')
parser.add_argument('--test_path',
                    type=str,
                    default=r'E:\datasets\test',
                    help='resume from checkpoint')
args = parser.parse_args()
print(args)

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

net = network(config,
              lr=None,
              resume=True,
              device=device,
              train_loader=None,
              mode='demo')

net.simple_demo(imgs_dir=args.test_path,
                conf_thres=args.conf_thres,
                nms_thres=args.nms_thres,
                num_dets=1000,
                ae_threshold=0.1)
Glat, Glon, GType, Gnum, Gedge = latlontypenumedge(GN, GE, dt.gasname1,
                                                   dt.gasname2)

plt.figure(figsize=(20, 12))
Base = bm.BaseMapSet(dt.Type1, dt.llon, dt.rlon, dt.llat, dt.rlat)
WX, WY = latlon2XY(Wlat, Wlon, Base)
PX, PY = latlon2XY(Plat, Plon, Base)
GX, GY = latlon2XY(Glat, Glon, Base)

Wsupply, Wtransmission, Wdemand = supplytrandemandnum(WType, dt.water1para,
                                                      Wnum)
Psupply, Ptransmission, Pdemand = supplytrandemandnum(PType, dt.power1para,
                                                      Pnum)
Gsupply, Gtransmission, Gdemand = supplytrandemandnum(GType, dt.gas1para, Gnum)

Shelby_Water = network(dt.water0para, Geox, Geoy)
Shelby_Power = network(dt.power0para, Geox, Geoy)
Shelby_Gas = network(dt.gas0para, Geox, Geoy)

Shelby_Water.x, Shelby_Water.y, Shelby_Water.Type = WX, WY, WType
Shelby_Power.x, Shelby_Power.y, Shelby_Power.Type = PX, PY, PType
Shelby_Gas.x, Shelby_Gas.y, Shelby_Gas.Type = GX, GY, GType

supplytrandemandxy(Shelby_Water)
supplytrandemandxy(Shelby_Power)
supplytrandemandxy(Shelby_Gas)

ShelbyNetwork = [Shelby_Water, Shelby_Power, Shelby_Gas]
edge = [Wedge, Pedge, Gedge]

for i in range(len(ShelbyNetwork)):
Example #7
0
print(args)

if len(config['gpu_ids']) == 1:
    os.environ["CUDA_VISIBLE_DEVICES"] = str(config['gpu_ids'][0])

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')


# Data
print('==> Preparing data...')
transform = transforms.Compose([
                                transforms.ToTensor(),
                                transforms.Normalize((0.485,0.456,0.406), (0.229,0.224,0.225))
                            ])
     
dataset = ListDataset(config['root'],config['train_root'],img_size=config['image_size'], fmp_size=config['fms_size'], 
                       classes=config['num_classes'], train=True,transform=transform)
train_loader = torch.utils.data.DataLoader(dataset, batch_size=args.batch_size, shuffle=True,collate_fn=dataset.collate_fn)
net = network(config, lr=args.lr, resume=args.resume, device=device, train_loader=train_loader)



if __name__=="__main__":
    net.train()






Example #8
0
def grid_search(X_train,
                X_test,
                y_train,
                y_test,
                learning_rates,
                lambdas,
                hidden,
                n_epochs=[5],
                b_sizes=[10]):
    #Runs through a range of hyper parameters and returns a dataframe
    #with accuracy for every tested combination
    etas = list()
    L2 = list()
    neurons = list()
    results_accuracy = list()
    results_bal_accuracy = list()
    training_results_accuracy = list()
    training_results_bal_accuracy = list()
    epochs = list()
    batch_sizes = list()
    #Create time stamp to track calculation times
    t0 = time()
    for eta in learning_rates:
        for lmd in lambdas:
            for h in hidden:
                for sizes in b_sizes:
                    for eps in n_epochs:
                        #Initialize network
                        input_neurons = len(X_train[:, 0])
                        print(input_neurons)
                        output_neurons = 1
                        net = network((input_neurons, h, output_neurons),
                                      smoosh_weights=False)
                        batch_size = sizes
                        #Set number of training epochs
                        number_of_epochs = eps
                        #set the training rate
                        net.learning_rate = eta
                        #Set regularization
                        net.L2 = lmd
                        #Set biases to 1
                        #net.set_bias()
                        print("LR: {} Hidden:{} L2:{}".format(eta, h, lmd))
                        for e in range(number_of_epochs):
                            #Keep track of raining progress and time usage
                            print("\rRunning time after {} epoch: {}".format(
                                e,
                                time() - t0),
                                  end="",
                                  flush=True)
                            #Create random index vector
                            k = net.make_batch_index(len(X_train[0, :]))
                            #Calculate number of batches and loop through
                            for i in range(len(k) // batch_size):
                                batch = k[i * batch_size:(i + 1) * batch_size]
                                net.train(X_train[:, batch], y_train[batch])
                            #Add results for current weights, biases
                            #On testing set
                        accuracy, confusion = net.test(X_test, y_test)
                        TP = confusion[0, 0]
                        TN = confusion[1, 1]
                        FP = confusion[0, 1]
                        FN = confusion[1, 0]
                        bal_accuracy = ((TP / (TP + FN)) + (TN /
                                                            (TN + FP))) / 2
                        #Strore data in preparation to make datarame
                        results_accuracy.append(accuracy)
                        results_bal_accuracy.append(bal_accuracy)
                        etas.append(eta)
                        L2.append(lmd)
                        neurons.append(h)
                        epochs.append(eps)
                        batch_sizes.append(sizes)
                        #On training set
                        accuracy, confusion = net.test(X_train, y_train)
                        TP = confusion[0, 0]
                        TN = confusion[1, 1]
                        FP = confusion[0, 1]
                        FN = confusion[1, 0]
                        bal_accuracy = ((TP / (TP + FN)) + (TN /
                                                            (TN + FP))) / 2
                        training_results_accuracy.append(accuracy)
                        training_results_bal_accuracy.append(bal_accuracy)

    data = {
        "ACC": results_accuracy,
        "BACC": results_bal_accuracy,
        "tACC": training_results_accuracy,
        "tBACC": training_results_bal_accuracy,
        "Learning rate": etas,
        "L2 factor": L2,
        "Hidden Neurons": neurons,
        "Batch size": batch_sizes,
        "Number epochs": epochs
    }
    return pd.DataFrame(data)
eff1, eff2 = [[], [], []], [[], [], []]

cluster_coeff1, cluster_coeff2 = [[], [], []], [[], [], []]

topodiameter1, topodiameter2 = [[], [], []], [[], [], []]

diameter1, diameter2 = [[], [], []], [[], [], []]

cost1, cost2 = [[], [], []], [[], [], []]

degree1, degree2 = [[], [], []], [[], [], []]
    
#----------------------------------------------------Network initialization
Temp = 0
while(Temp <= 50):
    Water = network(dt.water1para, Geox, Geoy)
    Power = network(dt.power1para, Geox, Geoy)
    Gas = network(dt.gas1para, Geox, Geoy)
    
    Network_obj = [Water, Power, Gas]
    
    ##For each of three networks: Water, Power, Gas
    for i in range(len(Network_obj)):
        #Decision of facility location of three networks
        Network = Network_obj[i]
        Network.network_setup(Tract_pop, Tractx, Tracty, i, lon, lat, dt.cnum)
        
        topo_eff1[i].append(Network.topo_efficiency)
        eff1[i].append(Network.efficiency)
        cluster_coeff1[i].append(Network.cluster_coeff)
        topodiameter1[i].append(Network.topodiameter)
Example #10
0
def grid_search(learning_rates, lambdas, hidden, n_epochs=[5], b_sizes=[10]):
    #Runs through a range of hyper parameters and returns a dataframe
    #with MSE/R2  scores for every tested combination
    etas = list()
    L2 = list()
    neurons = list()
    results_mse = list()
    results_r2 = list()
    epochs = list()
    batch_sizes = list()
    #Create time stamp to track calculation times
    t0 = time()
    #To keep track of progress
    counter = 0
    goal = len(learning_rates) * len(lambdas) * len(hidden) * len(
        n_epochs) * len(b_sizes)
    for eta in learning_rates:
        for lmd in lambdas:
            for h in hidden:
                for sizes in b_sizes:
                    for eps in n_epochs:
                        #Initialize network
                        input_neurons = len(X[0, :])
                        output_neurons = 1
                        net = network((input_neurons, h, output_neurons))
                        net.cost_function = "square"
                        net.fit_function = True
                        batch_size = sizes
                        #Set number of training epochs
                        number_of_epochs = eps
                        #set the training rate
                        net.learning_rate = eta
                        #Set regularization
                        net.L2 = lmd
                        counter += 1
                        #Keep track of raining progress and time usage
                        print("\rRunning time: {:.1f} Iteration: {}/{}".format(
                            time() - t0, counter, goal),
                              end="",
                              flush=True)
                        for e in range(number_of_epochs):
                            #Create random index vector
                            k = net.make_batch_index(len(X_train[0, :]))
                            #Calculate number of batches and loop through
                            for i in range(len(k) // batch_size):
                                batch = k[i * batch_size:(i + 1) * batch_size]
                                net.train(X_train[:, batch], y_train[batch])
                        #Calculate results for current settings
                        z = net.test(X.T, true)
                        r2 = R2(true, z[-1])
                        mse = MSE(true, z[-1])
                        results_mse.append(mse)
                        results_r2.append(r2)
                        etas.append(eta)
                        L2.append(lmd)
                        neurons.append(h)
                        epochs.append(eps)
                        batch_sizes.append(sizes)

    data = {
        "MSE": results_mse,
        "R2": results_r2,
        "Learning rate": etas,
        "L2 factor": L2,
        "Hidden Neurons": neurons,
        "Batch size": batch_sizes,
        "Number epochs": epochs
    }
    return pd.DataFrame(data)
Example #11
0

pg.init()  # Resets pygame libary
clock = pg.time.Clock()  # Setting the clock

screen_height, screen_length = 600, 1100  # Those variables get used in the other moudles
screen = pg.display.set_mode(
    (screen_length, screen_height))  # Creating a window

pg.display.set_caption("Tanks", "Spine Runtime")  # Set window caption
pg.display.set_icon(
    pg.image.load("images/tankico.ico"))  # Set window icon image

running = True

n = network()
data_to_send = n.getP()
received_data = n.send(
    data_to_send)  # First connection made, gives back the stats data to be set
print(received_data)
bullets_group = pg.sprite.Group()  # The tank's bullets that got fired

player = tank(
    (200, 200)
)  # Creating an object from the tank class and adding it to the tank sprite group. It gets the starting point from the server

###########
bars_and_panels = pg.sprite.Group()
bars_and_panels.add(money_bar())
bars_and_panels.add(health_bar())
###########
Example #12
0
# Zu Beginn müssen die Input Nodes festgelegt werden
inputnodes = 784
"""=======================================================CODE======================================================="""


def transformAnswer(antwortMatrix):
    index = Helper.getIndexOfMaxValue(antwortMatrix)
    return index


np.set_printoptions(threshold=np.inf)  # wird für Ausgabe der Matrizen benötigt

erg = pngWandler(path).openPictures()
images = []
targets = []
i = 0
j = 0
nw = network(inputnodes, 300, 10, 0.3)
while j < 3:
    for obj in erg:
        if i + 1 < len(erg):
            nw.train(erg[i + 1], erg[i])
        i += 2
    j += 1
eingabe = Eingabe(path, nw)
right, wrong = Helper.testNetwork(nw, path)
print(right)
print(wrong)
Helper.readFromMnist()
Example #13
0
args = parser.parse_args()
args.checkpoint_dir = os.path.join(args.checkpoint_dir, 'fold_%d'%args.fold)
checkpoint_dir = os.path.join(args.checkpoint_dir, 'fold_%d' %args.fold)
print(category[args.fold])

#set gpus
# gpu_list = [int(x) for x in args.gpu.split(',')]
os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu

torch.backends.cudnn.benchmark = True
cudnn.enabled = True

# Create network.
model = network()

#load resnet50 preatrained parameter
model = load_resnet_param(model, stop_layer='fc', layer_num=50)
model=nn.DataParallel(model,[0,1])

# disable the  gradients of not optomized layers
turn_off(model)

if not os.path.exists(checkpoint_dir):
    os.makedirs(os.path.join(checkpoint_dir))


trainset = dataset_train(data_dir=args.data_dir, fold=args.fold, input_size=args.input_size)
trainloader = data.DataLoader(trainset, batch_size=args.train_batch_size, shuffle=True, num_workers=4)
Example #14
0
transform = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
])
dataset = ListDataset(config['train_root'],
                      img_size=config['image_size'],
                      fmp_size=config['fms_size'],
                      classes=config['num_classes'],
                      train=True,
                      transform=transform)
trainloader = torch.utils.data.DataLoader(dataset,
                                          batch_size=args.batch_size,
                                          shuffle=True,
                                          collate_fn=dataset.collate_fn)

net = network(config, lr=args.lr, resume=args.resume, device=device)


def train(epoch):
    train_loss = 0.
    for idx, ip_dict in enumerate(trainloader):
        images = ip_dict['inputs'].to(device)
        targets = [t.to(device) for t in ip_dict['targets']]

        loss, log_loss = net(images, targets)

        train_loss += loss.item()
        print(
            '[Epoch %d, Batch %d/%d] [totalLoss:%.6f] [ht_loss:%.6f, off_loss:%.6f, pull_loss:%.6f, push_loss:%.6f]'
            % (epoch, idx, len(trainloader), loss.item(), log_loss[0],
               log_loss[1], log_loss[2], log_loss[3]))