Esempio n. 1
0
def main():
    config = sysconfig()
    print(config.Train_set_path)
    IS_Training = True
    if IS_Training:
        train(config)
    else:
        test(config)
Esempio n. 2
0
def main_test(args):
    print(args)
    test(task=args.task,
         modelname=args.model,
         data_dir=args.dataset,
         dim=args.dim,
         batch=args.batch,
         lr=args.lr,
         max_epoch=args.max_epoch,
         gamma=args.gamma,
         lossname=args.loss,
         negsample_num=args.eta,
         timedisc=args.timedisc,
         cuda_able=args.cuda,
         cmin=args.cmin,
         gran=args.gran,
         count=args.thre)
def main():
    print 'front_x'
    test(front_x(['bbb', 'ccc', 'axx', 'xzz', 'xaa']),
         ['xaa', 'xzz', 'axx', 'bbb', 'ccc'])
    test(front_x(['ccc', 'bbb', 'aaa', 'xcc', 'xaa']),
         ['xaa', 'xcc', 'aaa', 'bbb', 'ccc'])
    test(front_x(['mix', 'xyz', 'apple', 'xanadu', 'aardvark']),
         ['xanadu', 'xyz', 'aardvark', 'apple', 'mix'])
Esempio n. 4
0
def main():
  print 'front_x'
  test(front_x(['bbb', 'ccc', 'axx', 'xzz', 'xaa']),
       ['xaa', 'xzz', 'axx', 'bbb', 'ccc'])
  test(front_x(['ccc', 'bbb', 'aaa', 'xcc', 'xaa']),
       ['xaa', 'xcc', 'aaa', 'bbb', 'ccc'])
  test(front_x(['mix', 'xyz', 'apple', 'xanadu', 'aardvark']),
       ['xanadu', 'xyz', 'aardvark', 'apple', 'mix'])
Esempio n. 5
0
def main():
	test(desglosar(20),[(1,20)])
	test(desglosar(40),[(2,20)])
	test(desglosar(434),[(2,200),(1,20),(1,10),(2,2)])

	imprimirDesglose(desglosar(434))
def main():
    print 'unicos'
    test(unicos([]), [])
    test(unicos([1, 3, 1, 3]), [1, 3])
    test(unicos([1, 3, 5, 6, 6, 5, 3, 1, 9711]), [1, 3, 5, 6, 9711])
Esempio n. 7
0
def pretrain(args, train_data_loader, validate_data_loader, network,
             model_save_path):
    # build a loss function
    loss_function = nn.CrossEntropyLoss()
    # build an optimizer
    optimizer = SGD(params=network.parameters(),
                    lr=args.lr,
                    weight_decay=args.wd,
                    momentum=args.mo,
                    nesterov=True)
    # build a scheduler
    scheduler = MultiStepLR(optimizer, args.point, args.gamma)

    training_loss_list = []
    training_accuracy_list = []
    validating_accuracy_list = []
    best_validating_accuracy = 0

    for epoch in range(1, args.n_training_epochs + 1):
        # init training loss and training accuracy in this epoch
        training_loss = 0
        training_accuracy = 0
        # build a bar
        if not args.flag_no_bar:
            total = train_data_loader.__len__()
            bar = tqdm(total=total, desc='epoch %d' % (epoch), unit='batch')

        network.train()
        for batch_index, batch in enumerate(train_data_loader):
            images, labels = batch
            images = images.float().cuda(args.devices[0])
            labels = labels.long().cuda(args.devices[0])

            logits = network.forward(images)
            loss_value = loss_function(logits, labels)

            optimizer.zero_grad()
            loss_value.backward()
            optimizer.step()

            prediction = torch.argmax(logits, dim=1)
            training_loss += loss_value.cpu().item() * images.size()[0]
            training_accuracy += torch.sum(
                (prediction == labels).float()).cpu().item()

            if not args.flag_no_bar:
                bar.update(1)

        # get average training loss and average training accuracy
        training_loss /= train_data_loader.dataset.__len__()
        training_loss_list.append(training_loss)
        training_accuracy /= train_data_loader.dataset.__len__()
        training_accuracy_list.append(training_accuracy)
        # get validating accuracy
        validating_accuracy = test(args,
                                   validate_data_loader,
                                   network,
                                   description='validating')
        validating_accuracy_list.append(validating_accuracy)

        if not args.flag_no_bar:
            bar.close()
        # output after each epoch
        print(
            'epoch %d finish: training_loss = %f, training_accuracy = %f, validating_accuracy = %f'
            % (epoch, training_loss, training_accuracy, validating_accuracy))

        # if we find a better model
        if not args.flag_debug:
            if validating_accuracy > best_validating_accuracy:
                best_validating_accuracy = validating_accuracy
                record = {
                    'state_dict': network.state_dict(),
                    'validating_accuracy': validating_accuracy,
                    'epoch': epoch
                }
                torch.save(record, model_save_path)

        # adjust learning rate
        scheduler.step()

    return training_loss_list, training_accuracy_list, validating_accuracy_list
Esempio n. 8
0
def train_stage2(args, train_data_loader, validate_data_loader, teacher,
                 student, model_save_path2):
    print('===== training stage 2 =====')
    # build a loss function
    training_loss_function = nn.CrossEntropyLoss()
    teaching_loss_function = nn.KLDivLoss(reduction='batchmean')
    # build an optimizer
    optimizer2 = SGD([{
        'params': student.get_network_params(),
        'lr': 0.1 * args.lr2
    }, {
        'params': student.get_classifier_params(),
        'lr': args.lr2
    }],
                     weight_decay=args.wd,
                     momentum=args.mo,
                     nesterov=True)
    # build a scheduler
    scheduler2 = MultiStepLR(optimizer2, args.point, args.gamma)

    # get number of classes and number of embedding dimensions
    n_classes = train_data_loader.dataset.get_n_classes()
    n_teacher_dimension = teacher.fc.in_features
    n_student_dimension = student.fc.in_features

    # get global class centers with teacher model
    global_class_center_file_path = 'saves/class_centers/' + \
        '_data=' + str(args.data_name) + \
        '_teacher=' + str(args.teacher_network_name) + \
        '.center'
    if os.path.exists(global_class_center_file_path):
        class_center = torch.load(global_class_center_file_path)
        class_center = class_center.cuda(args.devices[0])
    else:
        class_center = torch.zeros(
            (n_classes, n_teacher_dimension)).cuda(args.devices[0])
        class_count = torch.zeros(n_classes).cuda(args.devices[0])
        for batch_index, batch in enumerate(train_data_loader):
            images, labels = batch
            images = images.float().cuda(args.devices[0])
            labels = labels.long().cuda(args.devices[0])

            with torch.no_grad():
                embedding = teacher.forward(images, flag_embedding=True)
                for i in range(0, n_classes):
                    index_of_class_i = (labels == i)
                    class_center[i] += torch.sum(embedding[index_of_class_i],
                                                 dim=0)
                    class_count[i] += index_of_class_i.size()[0]
        class_count = class_count.unsqueeze(1)
        class_center = class_center / class_count
        class_center = F.normalize(class_center, p=2, dim=1)
        torch.save(class_center, global_class_center_file_path)
    print('===== gloabl class centers ready. =====')

    training_loss_list2 = []
    teaching_loss_list2 = []
    training_accuracy_list2 = []
    validating_accuracy_list2 = []
    best_validating_accuracy = 0

    for epoch in range(1, args.n_training_epochs2 + 1):
        # init training loss, teaching loss, and training accuracy in this epoch
        training_loss = 0
        teaching_loss = 0
        training_accuracy = 0
        # build a bar
        if not args.flag_no_bar:
            total = train_data_loader.__len__()
            bar = tqdm(total=total,
                       desc='stage2: epoch %d' % (epoch),
                       unit='batch')

        student.train()
        for batch_index, batch in enumerate(train_data_loader):
            images, labels = batch
            images = images.float().cuda(args.devices[0])
            labels = labels.long().cuda(args.devices[0])

            # compute student logits and training loss
            student_logits, student_embedding = student.forward(images,
                                                                flag_both=True)
            training_loss_value = training_loss_function(
                student_logits, labels)

            # get local classes and their class centers
            label_table = torch.arange(n_classes).long().unsqueeze(1).cuda(
                args.devices[0])
            class_in_batch = (labels == label_table).any(dim=1)
            class_center_in_batch = class_center[class_in_batch]

            # compute teacher logits and teaching loss
            with torch.no_grad():
                teacher_embedding = teacher.forward(images,
                                                    flag_embedding=True)
                teacher_logits = torch.mm(teacher_embedding,
                                          class_center_in_batch.t())

            teaching_loss_value = args.lambd * teaching_loss_function(
                F.log_softmax(student_logits[:, class_in_batch] / args.tau2),
                F.softmax(teacher_logits / args.tau2, dim=1))

            loss_value = training_loss_value + teaching_loss_value

            optimizer2.zero_grad()
            loss_value.backward()
            optimizer2.step()

            prediction = torch.argmax(student_logits, dim=1)
            training_loss += training_loss_value.cpu().item() * images.size(
            )[0]
            teaching_loss += teaching_loss_value.cpu().item() * images.size(
            )[0]
            training_accuracy += torch.sum(
                (prediction == labels).float()).cpu().item()

            if not args.flag_no_bar:
                bar.update(1)

        # get average training loss, average teaching loss, and average training accuracy
        training_loss /= train_data_loader.dataset.__len__()
        training_loss_list2.append(training_loss)
        teaching_loss /= train_data_loader.dataset.__len__()
        teaching_loss_list2.append(teaching_loss)
        training_accuracy /= train_data_loader.dataset.__len__()
        training_accuracy_list2.append(training_accuracy)
        # get validating accuracy
        validating_accuracy = test(args,
                                   validate_data_loader,
                                   student,
                                   description='validating')
        validating_accuracy_list2.append(validating_accuracy)

        if not args.flag_no_bar:
            bar.close()
        # output after each epoch
        print(
            'epoch %d finish: training_loss = %f, teaching_loss = %f, training_accuracy = %f, validating_accuracy = %f'
            % (epoch, training_loss, teaching_loss, training_accuracy,
               validating_accuracy))

        # if we find a better model
        if not args.flag_debug:
            if validating_accuracy > best_validating_accuracy:
                best_validating_accuracy = validating_accuracy
                record = {
                    'state_dict': student.state_dict(),
                    'validating_accuracy': validating_accuracy,
                    'epoch': epoch
                }
                torch.save(record, model_save_path2)

        # adjust learning rate
        scheduler2.step()

    return training_loss_list2, teaching_loss_list2, training_accuracy_list2, validating_accuracy_list2
Esempio n. 9
0
    if done:
        state, done = env.reset(), False
    next_state, _, done = env.step(np.random.randint(0, action_space))
    val_mem.append(state, None, None, done)
    state = next_state
    T += 1
print("=====================================================")

if args.evaluate:
    dqn.eval()  # Set DQN (online network) to evaluation mode
    avg_reward, avg_Q = test(args,
                             0,
                             dqn,
                             val_mem,
                             metrics,
                             results_dir,
                             evaluate=True,
                             SIZE=SIZE,
                             NUM_TRAP=NUM_TRAP,
                             traps=traps,
                             goal=goal,
                             start=start)  # Test
    print('Avg. reward: ' + str(avg_reward) + ' | Avg. Q: ' + str(avg_Q))
else:
    # Training loop
    dqn.train()
    T, done = 0, True
    for T in trange(1, args.T_max + 1):
        if done:
            state, done = env.reset(), False

        if T % args.replay_frequency == 0:
Esempio n. 10
0
import matplotlib.pyplot as plt
import numpy as np

from Test import test
percentuali = list()
nodi = [10, 20, 30, 40, 50, 60]
percentuali = np.arange(0.0, 0.2,
                        0.01)  # crea vettore con elementi 0,0.05,0.10,...,1.95
t = test(nodi, percentuali)

#Creo i grafici

plt.figure(1)
legenda = []
for i in range(len(t[0])):
    plt.plot(percentuali, t[0][i])
    stringa = str(nodi[i]) + " nodi nel grafo"
    legenda.append(stringa)
plt.xlabel("Proababilità di creazione degli archi")
plt.ylabel("Tempo impiegato (s)")
plt.legend(legenda)

plt.figure(2)
legenda = []
for i in range(len(t[1])):
    plt.plot(percentuali, t[1][i])
    stringa = str(nodi[i]) + " nodi nel grafo"
    legenda.append(stringa)
plt.xlabel("Probabilità di creazione degli archi")
plt.ylabel("Numero di componenti connesse")
plt.legend(legenda)
Esempio n. 11
0
    elif youchoice == '2':
        print("You choose testing the model.\n")
        print("Now input the model you need and the test data. You sure?\n ")
        ##测试模型准确率*********************************************************************************************************************
        model = input("model: \n")
        testdata = input("test file: \n ")
        try:
            print("Running!Please wait and donnot do anything!\n")
            #####################输入需要使用的模型参数的文件位置和名
            net.load_state_dict(torch.load(model))

            ######################输入测试集的位置和名
            dataset = MyDataSet(testdata)
            data_loader = DataLoader(dataset, batch_size=batch_size, shuffle=False)
            test_correct = test(net, data_loader)
            print("Accuracy:",test_correct*100,"%","\n\n")
        except:
            print("Error:  Donnot Get the file,maybe you input the wrong file name.\n\n")
        else:
            print("Testing Done!\n\n")


    elif youchoice == '3':
        print("You choose catching the model")
        print("Name your pcap")
        pcap = input("pcap: \n")
        print("How many packet you want?")
        paket_num = input("The number of the packet \n")
        print("Running!Please wait and donnot do anything!")
        source = sniff(iface = conf.iface, count = int(paket_num))
Esempio n. 12
0
    training_loss_list, training_accuracy_list, validating_accuracy_list = \
        pretrain(args, train_data_loader, validate_data_loader, network, model_save_path)
    record = {
        'training_loss': training_loss_list,
        'training_accuracy': training_accuracy_list,
        'validating_accuracy': validating_accuracy_list
    }

    # create stats directories
    dirs = os.path.dirname(statistics_save_path)
    os.makedirs(dirs, exist_ok=True)
    if args.n_training_epochs > 0 and (not args.flag_debug):
        torch.save(record, statistics_save_path)
    print('===== pretraining finish. =====')

    # load best model
    if not args.flag_debug:
        record = torch.load(model_save_path)
        best_validating_accuracy = record['validating_accuracy']
        network.load_state_dict(record['state_dict'])
        print('===== best model loaded, validating acc = %f. =====' %
              (record['validating_accuracy']))

    # model testing
    testing_accuracy = test(args,
                            test_data_loader,
                            network,
                            description='testing')
    print('===== testing finished, testing acc = %f. =====' %
          (testing_accuracy))
Esempio n. 13
0
def main():
    print 'unicos'
    test(unicos([]),[])
    test(unicos([1,3,1,3]),[1,3])
    test(unicos([1,3,5,6,6,5,3,1,9711]),[1,3,5,6,9711])
Esempio n. 14
0
def main():
	print 'match_ends'
	test(match_ends(['aba', 'xyz', 'aa', 'x', 'bbb']), 3)
	test(match_ends(['', 'x', 'xy', 'xyx', 'xx']), 2)
	test(match_ends(['aaa', 'be', 'abc', 'hello']), 1)
Esempio n. 15
0
# first try
parser.add_argument('--firstTry', type=int, default=250)

# if input_type == 1:
#     choosed_information = 'snode index'
# if input_type == 2:
#     choosed_information = 'snode resource'
# if input_type == 3:
#     choosed_information = 'snode resource and neighbour link resource'

args = parser.parse_args()

if __name__ == '__main__':
    '''
        完全测试参数: fast_test=False, max_request_num=500, first_try = 250
        简单快速测试: fast_test=True, max_request_num=30, first_try = 30
    '''

    test(data_path=args.dataPath,
         batch_size=args.batchSize,
         iteration_num=args.iteration,
         max_request_num=args.requestNum,
         load_model_path=args.loadModelPath,
         save_model_path=args.saveModelPath,
         device=args.device,
         log_path=args.logPath,
         dropout=args.dropout,
         input_type=args.inputType,
         first_try=args.firstTry)
Esempio n. 16
0
    show_time.append(t)

def init_drivers(testTime):
    for j in xrange(testTime):
        Probability = random.uniform(0,2.0)
        if Probability >= 1:
            addDriver(j)
            Probability -= 1
            if Probability >= basicFrequency:
                addDriver(j)
        elif Probability >= basicFrequency:
            addDriver(j)

for i in xrange(testTimes):
    #PoissonCoef = basicFrequency + perTime * i
    tmpTest = test(road_type)
    drivers_list = []
    show_time = []
    init_drivers(tmpTest.testTime)
    f1.write("Road : " 
        + str({"maxv" : road_type.Vmax , "lenght" : road_type.numberOfPiece , "Time" : tmpTest.testTime , "Frequency" : basicFrequency})
        + "\n")
    f2.write("Road : " 
        + str({"lenght" : road_type.numberOfPiece * basicLength, "Time" : tmpTest.testTime , "Frequency" : basicFrequency})
        + "\n")
    for j in xrange(type_len):
        runTest(tmpTest,j)
    f1.write("\n")
    print "Finish" + str(i+1)

f2.close()
Esempio n. 17
0
        train_stage2(args, train_data_loader, validate_data_loader, teacher, student, model_save_path2)
    record = {
        'training_loss2': training_loss_list2,
        'teaching_loss2': teaching_loss_list2,
        'training_accuracy2': training_accuracy_list2,
        'validating_accuracy2': validating_accuracy_list2
    }

    # create stats directories
    dirs = os.path.dirname(statistics_save_path2)
    os.makedirs(dirs, exist_ok=True)
    if args.n_training_epochs2 > 0 and (not args.flag_debug):
        torch.save(record, statistics_save_path2)
    print('===== training stage 2 finish. =====')

    # load best model found in stage 2
    if not args.flag_debug:
        record = torch.load(model_save_path2)
        best_validating_accuracy = record['validating_accuracy']
        student.load_state_dict(record['state_dict'])
        print(
            '===== best model in stage 2 loaded, validating acc = %f. =====' %
            (record['validating_accuracy']))

    # model testing
    testing_accuracy = test(args,
                            test_data_loader,
                            student,
                            description='testing')
    print('===== testing finished, testing acc = %f. =====' %
          (testing_accuracy))
Esempio n. 18
0
                                           write_grads=True,
                                           write_graph=True,
                                           write_images=True)

history = model.fit(x_train,
                    y_train,
                    epochs=2500,
                    validation_split=0.1,
                    callbacks=[reduce_lr, check_point, tensor_board],
                    verbose=2,
                    batch_size=2000,
                    shuffle=True,
                    class_weight='auto')
# 保存最后一次训练的模型
model.save('model.h5')
test()
"""
model.save('model.h5')
hist = pd.DataFrame(history.history)
hist['epoch'] = history.epoch

plt.figure()
plt.xlabel('Epoch')
plt.ylabel('MAE(Metric)')
plt.plot(hist['epoch'], hist['mean_absolute_error'], label='train_MAE(Metric)')
plt.plot(hist['epoch'], hist['val_mean_absolute_error'], label='val_MAE(Metric)')
plt.legend()
plt.savefig('Loss.png')
plt.show()

plt.figure()
Esempio n. 19
0
from Test import test

#ESECUZIONE PROGRAMMA

n = input('Inserisci la dimensione del vettore da ordinare : ')

test(n)


Esempio n. 20
0
Mikatae = loadMikatae()

#Mikatae e Cerevisiae sono ora ottimizzati per essere usati come dataset. Cerevisiae contiene una colonna in più di mikatae
#che rappresenta l'essenzialità.

estimator = Perceptron(max_iter=10)

#test è una funzione che iterando sulla dimensione del training set (preso da Cerevisiae) e sulla quantià di elementi essenziali e non, presenti
# in esso, si effettuano vari test di predizione, si termina quando sene trova uno con almeno la precisione desiderata, e viene
# ritornato l'algoritmo fittato con il tranining set in questione.
# potrebbe non trovare un training set con tale precisione.
#trovato il miglior estimatore si usa per predirre l'essenzialità del dataset Mikatae

#valori di ritorno estimator, max, maxsize, maxessN, maxAccuracy, bestMatrix
estimator, prec, trainSize, essElementsInTrain, accuracy, confusionMatrix, X_train_best, y_train_best, X_test_best, y_test_best, y_pred_best, plt=\
                                                                                                test(estimator, Cerevisiae, 50, 449, 10,
                                                                                                                           450, 2500, 10)
if estimator != 0:
    print("L'algoritmo è stato allenato su un train set di dimensione ",
          end="")
    print(trainSize, end="")
    print(" e con un numero di elementi essenziali pari a ", end="")
    print(essElementsInTrain)
    print("Precisione = ", end="")
    print(prec)
    print("Accuratezza = ", end="")
    print(accuracy)
    print("Matrice confusionale = ")
    print(confusionMatrix)
    print(
        "L'estimatore è stato usato poi per predirre l'essenzialità del dataset Mikatae, ottenendo su 4500 elementi, i seguenti risultati:"
    )
def main():
    print 'match_ends'
    test(match_ends(['aba', 'xyz', 'aa', 'x', 'bbb']), 3)
    test(match_ends(['', 'x', 'xy', 'xyx', 'xx']), 2)
    test(match_ends(['aaa', 'be', 'abc', 'hello']), 1)