示例#1
0
def main():
    torch.manual_seed(43)

    criterion = nn.CrossEntropyLoss()
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    train_loader, test_loader = data_loaders()

    model = FNN_ID(750).to(device)
    optimizer = optim.Adam(model.parameters())

    train(model, device, train_loader, criterion, optimizer)
    test(model, device, test_loader, criterion)
    save(model)
def main(net, dataloader, device, config):
    train_loader, test_loader = dataloader[0], dataloader[1]
    optimizer = optim.SGD(net.parameters(),
                          lr=config.lr,
                          momentum=config.momentum,
                          weight_decay=config.weight_decay)
    scheduler = optim.lr_scheduler.ExponentialLR(optimizer, 0.95)

    crit = get_criterion(config)
    crit = crit.to(device)

    if not os.path.isdir(config.weight):
        os.makedirs(config.weight)
    checkpoint = os.path.join(config.weight, config.model + '.pth')

    best_acc = 0
    for epoch in range(config.epoch):
        logging.info(f'LR: {scheduler.get_lr()}')
        ########## TRAIN ##########
        start = time()
        net.train()
        loss_train = train(config, net, device, train_loader, crit, optimizer,
                           epoch)
        end = time() - start
        logging.info(
            f'=> Epoch[{epoch}], Average train Loss: {loss_train:.3f}, Tot Time: {end:.3f}'
        )

        ########## TEST ###########
        start = time()
        net.eval()
        acc = test(config, net, device, test_loader, epoch)
        end = time() - start
        logging.info(
            f'=> Epoch[{epoch}], Final accuracy: {acc:.3f}, Tot Time: {end:.3f}\n'
        )
        if acc > best_acc:
            torch.save(net.state_dict(), checkpoint)
            logging.info(f'Saving best model checkpoint at {checkpoint}')
            best_acc = acc
        scheduler.step()

    logging.info(f'Best test accuracy: {best_acc}')
示例#3
0
def main() -> None:
    """Linebacker project in its entirety."""
    system("clear")

    script = select_script()
    scene = select_scene(script=script)
    reformatted_script = load_script(script=script, scene=scene)

    divider = colored("=" * 50, "blue")
    print(f"\n{ divider }\n")

    n_incorrect, n_total = test(script=reformatted_script)

    print(f"\n{ divider }\n")

    accuracy = grade(incorrect=n_incorrect, total=n_total)

    color = grade_color(accuracy=accuracy)
    percentage = colored(f"{ accuracy }%", color)

    if accuracy < 100:
        print(f"Your memory has an { percentage } accuracy. Keep practicing!")
    else:
        print(f"Your memory has an { percentage } accuracy. Good job!")
示例#4
0
from sklearn import tree
from variables import *
from functions import train,make_conversions,output_data,test

# train the program 
name,Pclass,Sex,SibSp,Parch,Embarked,Survived = train()
make_conversions(Pclass,Sex,SibSp,Parch,Embarked)

# test the program, this recieved the inputs from the test set
name,Pclass,Sex,SibSp,Parch,Embarked = test()
make_conversions(Pclass,Sex,SibSp,Parch,Embarked)

# fill x with all possible parameters for each person
for i in range(0,len(name)):
    z.append(pclass_conv[i])
    z.append(sex_conv[i])
    z.append(sibsp_conv[i])
    z.append(parch_conv[i])
    z.append(embarked_conv[i])
    x.append(z)
    z = []

# apply to sklearns function
y = Survived
clf = tree.DecisionTreeClassifier()
clf = clf.fit(x,y)

# output a csv to compare results to the original training dataset
output_data(name,Pclass,Sex,SibSp,Parch,Survived,Embarked,clf)

示例#5
0
    start_epoch = checkpoint['epoch']
    model.load_state_dict(checkpoint['state_dict'])
    optimizer.load_state_dict(checkpoint['optimizer'])
    logger = Logger(os.path.join(args.checkpoint, 'log.txt'), title=title, resume=True)
else:
    logger = Logger(os.path.join(args.checkpoint, 'log.txt'), title=title)
    logger.set_names(['Learning Rate', 'Train Loss', 'Valid Loss','Train Acc.', 'Valid Acc.', 'Train Acc.5',
                      'Valid Acc.5'])

# Train and validate
for epoch in range(start_epoch, args.epochs):
    state['lr'] = adjust_learning_rate(state['lr'], optimizer, epoch, args.gamma, args.schedule)
    print('\nEpoch: [%d | %d] LR: %f' % (epoch + 1, args.epochs, state['lr']))

    train_loss, train_acc, train_acc5 = train_one_epoch(trainloader, model, criterion, optimizer, use_cuda=use_cuda)
    test_loss, test_acc, test_acc5 = test(testloader, model, criterion, use_cuda=use_cuda)
    # append logger file
    logger.append([state['lr'], train_loss, test_loss, train_acc, test_acc, train_acc5, test_acc5])

    # save model ap
    is_best = test_acc > best_acc
    best_acc = max(test_acc, best_acc)
    if do_save_checkpoint:
        save_checkpoint({
                'epoch': epoch + 1,
                'state_dict': model.state_dict(),
                'acc': test_acc,
                'best_acc': best_acc,
                'optimizer' : optimizer.state_dict(),
            }, is_best, checkpoint=args.checkpoint)
示例#6
0
文件: OMEN.py 项目: kumabearken/OMEN
import functions
import time
"""set true if want to hardcode admin email"""
AUTO = True
HARDSET = "insert email"

status = {}
"""Hardcoded admin email"""
if AUTO:
    admin = HARDSET
else:
    admin = functions.set_admin()

while True:
    status = functions.get_status()
    if functions.test(status=status):
        pass
    else:
        print("DETECTED PROBLEM")
        time.sleep(5)
        dns, key_name = functions.create_instance()
        functions.transfer(dns=dns, key_name=key_name)
        time.sleep(5)
        functions.notify(email=admin, dns=dns, key_name=key_name)
        time.sleep(5)
        functions.shutdown()
    time.sleep(5)
criterion = nn.CrossEntropyLoss()

optimizer = optim.Adam(resnet.parameters(), lr = learning_rate)

all_train_loss=[]
all_train_acc = []
all_test_loss = []
all_test_acc = []

dynamics = []

for epoch in range(1, nb_epochs + 1):  
      
    train_l, train_a = train(resnet, train_loader, optimizer, criterion, epoch, batch_log, device) 
    train_l, train_a = test(resnet, train_loader, criterion, epoch, batch_log, device) 
    all_train_loss.append(train_l)
    all_train_acc.append(train_a)

    test_l, test_a = test(resnet, test_loader_sc, criterion, epoch, batch_log, device) 
    all_test_loss.append(test_l)
    all_test_acc.append(test_a)

    dynamics.append({
        "epoch": epoch,
        "train_loss": train_l,
        "train_acc": train_a,
        "test_loss": test_l,
        "test_acc": test_a
    })
示例#8
0
from sklearn import tree
from variables import *
from functions import make_conversions, train, output_data, test

# train the program
outlook, temperaturelevel, humiditylevel, windyornot, playornot = train()
make_conversions(outlook, temperaturelevel, humiditylevel, windyornot)

# test the program, this recieved the inputs from the test set
outlook, temperaturelevel, humiditylevel, windyornot = test()
make_conversions(outlook, temperaturelevel, humiditylevel, windyornot)

# fill x with all possible parameters for each day
for j in range(0, 14):
    z.append(outlook_conv[j])
    z.append(temperature_conv[j])
    z.append(humidity_conv[j])
    z.append(windy_conv[j])
    x.append(z)
    z = []

# apply to sklearns function
y = playornot
clf = tree.DecisionTreeClassifier()
clf = clf.fit(x, y)

# output a csv to compare results to the original training dataset
output_data(outlook, temperaturelevel, humiditylevel, windyornot, clf,
            playornot)
        kanazawa(f_in, ratio, nratio, srange),
        kanazawa_big(f_in, ratio, nratio, srange=0),
        kanazawa(f_in, ratio=2**(1 / 3), nratio=6, srange=0),
        kanazawa(f_in, ratio=2**(1 / 3), nratio=6, srange=2)
    ]

    for m, model in enumerate(models):
        print("trial {}, model {}".format(ii, m))
        model.to(device)
        model_log = open("mnist_trained_model_{}_{}_k.pickle".format(m, ii),
                         "wb")

        for epoch in range(1, nb_epochs + 1):
            train_l, train_a = train(model, train_loader, learning_rate,
                                     criterion, epoch, batch_log, device)
            train_l, train_a = test(model, train_loader, criterion, epoch,
                                    batch_log, device)
            dyn = {"epoch": epoch, "train_loss": train_l, "train_acc": train_a}
            pickle.dump(dyn, model_log)
        pickle.dump(model, model_log)
        model_log.close()

        #lists of last test loss and acc for each scale with model ii
        s_test_loss = []
        s_test_acc = []
        for s in scales:
            test_transf = transforms.Compose([
                transforms.Resize(40),
                RandomRescale(size=40, scales=(s, s), sampling="uniform"),
                transforms.ToTensor(),
                transforms.Normalize((0.1307, ), (0.3081, ))
            ])
示例#10
0
from functions import train, test
import random
    
#Load in files
random.seed(int(sys.argv[1]))
k = int(sys.argv[2])
training_set = np.loadtxt(sys.argv[3])
testing_set = np.loadtxt(sys.argv[4])

centroids = []
for j in range(k):
    centroid = []
    for i in range(training_set.shape[1]):
        low = np.amin(training_set[:,i]) 
        high = np.amax(training_set[:,i])
        lower = low + ((high-low)/k) * j
        higher = low + ((high-low)/k) * (j+1)
        centroid.append(random.uniform(lower, higher))
    centroids.append(centroid)

centroids = np.asarray(centroids, dtype=float)

#Convert to 1D array if necessary
if len(training_set) < 2:
    training_set = np.array(training_set)
elif len(testing_set) < 2:
    testing_set = np.array(testing_set)

centroids = train(training_set, centroids, k)
test(testing_set, centroids, k)
示例#11
0
                transforms.Resize(40),
                RandomRescale(size=40, scales=(s, s), sampling="uniform"),
                transforms.ToTensor(),
                transforms.Normalize((0.1307, ), (0.3081, ))
            ])
            test_set = datasets.CIFAR10(root=root,
                                        train=False,
                                        transform=test_transf,
                                        download=True)
            test_loader = DataLoader(dataset=test_set,
                                     batch_size=batch_size,
                                     shuffle=False,
                                     num_workers=1,
                                     pin_memory=True)

            test_l, test_a = test(model, test_loader, criterion, 200,
                                  batch_log, device)

            s_test_loss.append(test_l)  #take only last value
            s_test_acc.append(test_a)

        locals()['test_losses_{0}'.format(m)].append(s_test_loss)
        locals()['test_accs_{0}'.format(m)].append(s_test_acc)

for m in range(len(models)):
    pickle.dump(locals()['test_losses_{0}'.format(m)], log)
    pickle.dump(locals()['test_accs_{0}'.format(m)], log)

for m in range(len(models)):
    locals()['avg_test_loss_{0}'.format(m)] = np.mean(np.array(
        locals()['test_losses_{0}'.format(m)]),
                                                      axis=0)
示例#12
0
criterion = nn.CrossEntropyLoss()
optimizer1 = optim.Adam(net1.parameters(), lr=learning_rate)
optimizer2 = optim.Adam(net2.parameters(), lr=learning_rate)
optimizer3 = optim.Adam(net3.parameters(), lr=learning_rate)
optimizer4 = optim.Adam(net4.parameters(), lr=learning_rate)

train_loss1 = []
train_acc1 = []
test_loss1 = []
test_acc1 = []

for epoch in range(1, nb_epochs + 1):
    train_l, train_a = train(net1, train_loader, optimizer1, criterion, epoch,
                             batch_log, device)
    train_l, train_a = test(net1, train_loader, criterion, epoch, batch_log,
                            device)
    test_l, test_a = test(net1, test_loader, criterion, epoch, batch_log,
                          device)
    train_loss1.append(train_l)
    train_acc1.append(train_a)
    test_loss1.append(test_l)
    test_acc1.append(test_a)

with open("compareCNNs_log1.txt", "w") as output:
    output.write(
        "nb_epochs=100\t learning_rate=0.00001\t batch_size=128\t ratio=2^(2/3)\t nratio=3 \n"
    )
    output.write("SiCNN1\t 3,36,3-36,64,3-64,150,10\tsrange=0,3\n")
    output.write(str(train_loss1))
    output.write(str(train_acc1))
    output.write(str(test_loss1))
示例#13
0
#!/usr/bin/env python3
import sys
import numpy as np
from functions import train, test

#Load in files
training_file = sys.argv[1]
testing_file = sys.argv[2]

#Convert to 1D array if necessary
train_test = [np.loadtxt(training_file), np.loadtxt(testing_file)]
for i in range(0, 2):
    if len(train_test[i].shape) < 2:
        train_test[i] = np.array([train_test[i]])

tree = []
train(train_test[0], tree, 1)
#indices = np.argsort(tree,axis=0)
#for i in indices[:,0]:
#print(tree[i])

test(train_test[1], tree)
示例#14
0
from pprint import pprint

dataset = pd.read_csv('car_evaluation.csv')

columns = list(dataset.columns.values)
# print(columns)
# print(set(dataset[columns[0]].tolist()))
# print(dataset.dtypes)
from functions import repair_dataset
dataset = repair_dataset(dataset, .2, 4, .4)
target_attribute_name = columns[-1]
total_data = int(len(dataset[target_attribute_name].tolist()) * .95)
continuous_threshold = .2
continuous_ignore = .4
continuous_partition = 4
# print(dataset)

# split data
from functions import train_test_split
training_data, testing_data = train_test_split(dataset, total_data)

# create tree
from functions import ID3
tree = ID3(training_data, training_data, training_data.columns[:-1],
           target_attribute_name)
# pprint(tree)

# test
from functions import test
test(testing_data, tree, target_attribute_name)
示例#15
0
#scheduler = optim.lr_scheduler.StepLR(optimizer2, step_size=40, gamma=0.1)

optimizer3 = optim.Adam(sicnn3.parameters(), lr = learning_rate)


optimizerm = optim.Adam(mini.parameters(), lr = learning_rate)

train_loss_1=[]
train_acc_1 = []
valid_loss_1 = []
valid_acc_1 = []

for epoch in range(1, nb_epochs + 1): 
    
    train_l, train_a = train(sicnn1, train_loader, optimizer1, criterion, epoch, batch_log, device) 
    train_l, train_a = test(sicnn1, train_loader, criterion, epoch, batch_log, device) 
    valid_l, valid_a = test(sicnn1, valid_loader, criterion, epoch, batch_log, device)
    train_loss_1.append(train_l)
    train_acc_1.append(train_a) 
    valid_loss_1.append(valid_l)
    valid_acc_1.append(valid_a)

with open("mnist_sicnn1_log.txt", "w") as output:
    output.write("nb_epochs=200\t lr=0.0001 \t batch_size=256\n")
    output.write("SiCNN kanazawa \t ratio=2^(2/3), nratio=3, srange=3\n")
    output.write(str(train_loss_1))
    output.write("\n")
    output.write(str(train_acc_1))
    output.write("\n")
    output.write(str(valid_loss_1))
    output.write("\n")
sicnn.to(device)

criterion = nn.CrossEntropyLoss()
std_optimizer = optim.Adam(stdnet.parameters(), lr = learning_rate)
sicnn_optimizer = optim.Adam(sicnn.parameters(), lr = learning_rate)

sicnn_train_loss=[]
sicnn_train_acc = []
sicnn_test_loss = []
sicnn_test_acc = []

sicnn_dyn = []

for epoch in range(1, nb_epochs + 1):  
    train_l, train_a = train(sicnn, train_loader, sicnn_optimizer, criterion, epoch, batch_log, device) 
    test_l, test_a = test(sicnn, test_loader, criterion, epoch, batch_log, device)
    sicnn_train_loss.append(train_l)
    sicnn_train_acc.append(train_a)
    sicnn_test_loss.append(test_l)
    sicnn_test_acc.append(test_a)

    sicnn_dyn.append({
        "epoch": epoch,
        "train_loss": train_l,
        "train_acc": train_a,
        "test_loss": test_l,
        "test_acc": test_a
    })

std_train_loss = []
std_train_acc = []
示例#17
0
    #endregion

    if run:

        game.draw_matrix()
        im2 = game.draw_image()
        cv2.imshow("frame2", im2)

        game.init_matrix()
        start = time.time()

        frame = capture.force_update()  # uh oh
        im = functions.cut(frame)

        if tried != -1:
            tried, last = functions.test(im, p_bot, game, last, mode, change,
                                         tried, random)

        random = False

        if tried >= 15:
            change = False
            if mode == "tree":
                print("> I'M SORRY, BACK TO CHOPPING")
            else:
                print("> I'M SORRY, BACK TO MINING")
            tried = 0

        functions.draw(im)
        functions.grid(im)

        cv2.imshow("frame1", im)
    m_t_accs = []
    m_v_losses = []
    m_v_accs = []

    for ii in range(repeats):
        model = SiCNN_3(f_in, size, ratio, nratio, srange)
        model.to(device)

        train_loss = []
        train_acc = []
        valid_loss = []
        valid_acc = []
        for epoch in range(1, nb_epochs + 1):
            train_l, train_a = train(model, train_loader, learning_rate,
                                     criterion, epoch, batch_log, device)
            train_l, train_a = test(model, train_loader, criterion, epoch,
                                    batch_log, device)
            valid_l, valid_a = test(model, valid_loader, criterion, epoch,
                                    batch_log, device)
            train_loss.append(train_l)
            train_acc.append(train_a)
            valid_loss.append(valid_l)
            valid_acc.append(valid_a)

        with open("model_{}_{}_cifar.pickle".format(s, ii), "wb") as save:
            pickle.dump(model, save)

        m_t_losses.append(train_loss)
        m_t_accs.append(train_acc)
        m_v_losses.append(valid_loss)
        m_v_losses.append(valid_acc)
示例#19
0
      (sum(p.numel() for p in model.parameters()) / 1000000.0))
# set model and its secondaries
model = model.cuda()
model = torch.nn.DataParallel(model)
cudnn.benchmark = True
criterion = nn.CrossEntropyLoss()  # loss function

# ............ find results ................
epsilons = args.epsilons
targeted = args.targeted
resume_path = args.resume
file_path = args.output_file_path

loss_normal, top1_acc_normal, top5_acc_normal = test(testloader,
                                                     model,
                                                     criterion,
                                                     use_cuda,
                                                     test_flag=False)
for epsilon in epsilons:
    cw_initial_const = 0.1
    attacks = ["FGSM", "LinfBIM", "MIM", "CWL2", "LinfPGD"]

    attacks_loss, attacks_acc, attacks_acc5 = adversary_test_attacks(
        model,
        criterion,
        testloader,
        num_classes,
        attacks,
        epsilon=epsilon,
        targeted=targeted,
        cw_initial_const=0.1,
train_loss3 = []
train_acc3 = []
valid_loss3 = []
valid_acc3 = []

train_loss4 = []
train_acc4 = []
valid_loss4 = []
valid_acc4 = []

for epoch in range(1, nb_epochs + 1):

    train_l, train_a = train(resnet2, train_loader, learning_rate, criterion,
                             epoch, batch_log, device)
    train_l, train_a = test(resnet2, train_loader, criterion, epoch, batch_log,
                            device)
    train_loss2.append(train_l)
    train_acc2.append(train_a)

    valid_l, valid_a = test(resnet2, valid_loader, criterion, epoch, batch_log,
                            device)
    valid_loss2.append(valid_l)
    valid_acc2.append(valid_a)

with open("compareResnet_log2.txt", "w") as output:
    output.write("ResNet\t size=16\t ratio=2^(-1/3)\t nratio=6 \n")
    output.write(str(train_loss2))
    output.write(str(train_acc2))
    output.write(str(valid_loss2))
    output.write(str(valid_acc2))
示例#21
0
criterion = nn.CrossEntropyLoss()

all_optimizer = optim.Adam(allcnn.parameters(), lr=learning_rate)
siall_optimizer = optim.Adam(siallcnn.parameters(), lr=learning_rate)

siall_train_loss = []
siall_train_acc = []
siall_test_loss = []
siall_test_acc = []
siall_test_loss_sc = []
siall_test_acc_sc = []

for epoch in range(1, nb_epochs + 1):
    train_l, train_a = train(siallcnn, train_loader, siall_optimizer,
                             criterion, epoch, batch_log, device)
    test_l, test_a = test(siallcnn, test_loader, criterion, epoch, batch_log,
                          device)
    siall_train_loss.append(train_l)
    siall_train_acc.append(train_a)
    siall_test_loss.append(test_l)
    siall_test_acc.append(test_a)
    test_l_sc, test_a_sc = test(siallcnn, test_loader_sc, criterion, epoch,
                                batch_log, device)
    siall_test_loss_sc.append(test_l_sc)
    siall_test_acc_sc.append(test_a_sc)

with open("si_allcnn_log_scaled.txt", "w") as output:
    output.write(str(siall_train_loss))
    output.write(str(siall_train_acc))
    output.write(str(siall_test_loss))
    output.write(str(siall_test_acc))
    output.write(str(siall_test_loss_sc))
示例#22
0
#import functions as F
#from functions import test
#import argparse # https://docs.python.org/3/library/argparse.html#argparse.ArgumentParser.add_argument
#import yaml # https://rollout.io/blog/yaml-tutorial-everything-you-need-get-started/
#import pandas as pd
import functions as F
#import functions as F
#import sklearn decisiontree

if __name__ == "__main__":
	print("test_import.py name:", __name__)
	print("functions.py name:", F.__name__)

	print("print from test_import.py")

	F.test()
	#if zscore_norm:
	#	df = zscore_norm(df)
	#parser = argparse.ArgumentParser()

	# name is how we pass it in on the command line
	# dest is the attribute it goes into
	#parser.add_argument('-path', type=int, help='an integer for param1', action="store", dest = "path", default = 5)
	#parser.add_argument('-save_path', type=str, help='an integer for param1', action="store", dest = "save_path")
	#parser.add_argument('-data_to_predict', type=str, help='an integer for param1', action="store", dest = "pred_data")
	#parser.add_argument('-nomralize_flag', type=str, help='an integer for param1', action="store", dest = "save_path")

	# put the arguments in results
	#results = parser.parse_args()

	#df = pd.load(results.path)
示例#23
0
from functions import test

n_features = 2
n_clusters = 3


test(n_features, n_clusters)


"""
strange importError when using matplotlib.pyplot in ipynb

NameError: global name 'plt' is not defined

ImportError: cannot import name plot_cluster
-->  You have circular dependent imports


--> add an extra cell in the beginning of the notebook
%matplotlib inline
import matplotlib.pyplot as plt
plt.plot([1,2,3,4])

"""