Пример #1
0
def _main(args):
    data_path = os.path.expanduser(args.data_path)
    classes_path = os.path.expanduser(args.classes_path)
    anchors_path = os.path.expanduser(args.anchors_path)

    data = KittiData(m=100, output_path="./data/tiny", image_data_size=(608, 608), h5path="data/tiny/KITTI.h5")
    # data = KittiData()
    num_classes = len(data.classes)
    # model_body, model = create_model(data.image_data_size, data.anchors, data.classes, model_file="data/model_data/yolo.h5", freeze_body=False)

    # tic = time.time()
    # pl = spar_keras.get_prunable_layers(model_body)
    # cutoff = spar_keras.calc_cutoff(pl)
    # print(cutoff)
    # n_pruned = spar_keras.prune_weights(pl, cutoff)
    # print(time.time() - tic)
    # print(n_pruned)

    # tic = time.time()
    # prune_network(model_body)
    # print(time.time()-tic)

    # train_with_pruning(data, weights_file="coco_retrain_full.h5")
    train_gen(None, data, weights_file="coco_retrain_full.h5")
    test(args, model_body, data, weights_file="trained_checkpoint_best.h5")
def main():
    # list of models to run
    model_li = [
        "batch_1_4", "batch_2_3a_SECOND_TRY", "batch_3_a", "batch_3_a_b",
        "batch_3_b", "batch_3a_4", "batch_4_plate_1", "batchfive",
        "merged_batch_1_batch_2", "model_batch_1_3a", "model_batch_1_3b",
        "model_XCL_NPC_1", "model_XCL_NPC_2"
    ]
    # list of test sets to run
    test_set_li = [
        "batch_1_3a", "batch_1_3b", "batch_1_4", "batch_2_3a", "batch_2_3b",
        "batch_3_a", "batch_3_b", "batch_3_a_b", "batch_3a_4", "batch_3b_4",
        "batch_4_plate_1", "batchfive", "XCL_NPC_batch_1", "XCL_NPC_batch_2",
        "batch1and2"
    ]

    file = "results.txt"  # text file to append results to
    csv_path = ""  # csv file to append accuracies to

    result_li = []
    for model in model_li:
        for test_set in test_set_li:
            acc, report = test(test_set, model)
            f = open(file, "a")
            f.write("the accuracy for model " + model + " and test set " +
                    test_set + " is " + str(acc) + "\n")
            f.write(report + "\n")
            f.write(
                "..............................................................................\n"
            )
            result_li.append([model, test_set, str(acc)])
            f.close()

    convert_dict_to_csv(result_li, csv_path)
def Extract_Tags(gameID):
	tags_per_game = []
	game_folder = folder + '/Game' + str(gameID)

	filename = game_folder + '/tags.txt' 
	#print filename
	f = open(filename, "w")

	model = 'GMM_model_' + str(gameID)


	object_files = os.listdir(game_folder)
	for objects in object_files:
		if objects.endswith('.jpg'):
			image_path = game_folder + '/' + objects
			#print image_path 
			img = cv2.imread(image_path)
			feature_vector = test_ft.FeatureExtraction(img)
			tags = test_model.test(feature_vector, model)
			for tag in tags:
				T = tag.split('_', 1)[0]
				if T not in tags_per_game:
					tags_per_game.append(T)
					print T
					f.write(T +'\n')	
	
	print game_folder 
Пример #4
0
def train_model(data_loader,
                model,
                criterion,
                optimizer,
                scheduler,
                start_iter,
                epochs,
                name,
                best_prec1=0,
                best_prec5=0):
    best_prec1 = best_prec1
    best_prec5 = best_prec5
    loss = []
    train_acc = []
    test_acc = []
    for epoch in range(start_iter, epochs):
        start_time = time.time()
        lossbatch, accbatch = train(data_loader['train'], model, criterion,
                                    optimizer, epoch)
        loss.append(lossbatch)
        train_acc.append(accbatch)
        prec1, prec5, _ = test(data_loader['eval'], model, criterion)
        scheduler.step()

        is_best = prec1 > best_prec1
        best_prec1 = max(prec1, best_prec1)
        test_acc.append(best_prec1)
        best_prec5 = max(prec5, best_prec5)
        test_acc.append(best_prec5)
        end_time = time.time()
        time_all = end_time - start_time

        save_checkpoint(
            {
                'epoch':
                epoch + 1,
                'state_dict':
                model.module.state_dict() if parallel else model.state_dict(),
                'optimizer_state_dict':
                optimizer.state_dict(),
                'scheduler_state_dict':
                scheduler.state_dict(),
                'train_result': {
                    'losses': loss,
                    'train_acc': train_acc
                },
                'test_result':
                test_acc,
                'best_prec1':
                best_prec1,
                'best_prec5':
                best_prec5,
                'epoch_time':
                time_all
            }, is_best, name, stage)
        print('Total time for one epoch is {}s'.format(time_all))
    print('Best accuracy@1: ', best_prec1)
    print('Best accuracy@5: ', best_prec5)
Пример #5
0
def run_test(model_dir,data_dir,mode,beam_width = 10):

    test_dir = model_dir + 'test'
    os.makedirs(test_dir, exist_ok=True)

    log_filename = test_dir + '/test_data.log'
    checkpoint_path = model_dir + 'pkl.tar'

    create_log(log_filename)
    vocab_path = os.path.join(data_dir,'vocab.pkl')
    data_pre = data_dir + 'test'

    with open(vocab_path,'rb') as f:
        vocab = pickle.load(f)


    n_words = len(vocab['wtoi'])
    checkpoint = torch.load(checkpoint_path)
    hparams = checkpoint['hparams']
    hparams['beam_width'] = beam_width

    print('Building model...')
    model = hred(hparams=hparams, n_words=n_words,
                 itfloss_weights=None).cuda()
    model.load_state_dict(checkpoint['model'])

    if mode == 'test':
        print("Loading test dataset...")
        dataset = DailyDataset(hparams,data_pre,vocab)

        print('Inference utterences ...')
        test(hparams,model,dataset,
         os.path.join(os.path.dirname(checkpoint_path),
                      'inf.'+os.path.basename(checkpoint_path)))
    elif mode == 'chat':
        print('Chatting with bot...')
        chat(hparams,model,vocab)
    else:
        raise ValueError('Unknown mode !')
        data, target = data.to(device), target.to(device)
        output = model(data)
        loss = criterion(output, target)
        valid_loss += loss.item() * data.size(0)

    # average loss calculations
    train_loss = train_loss / len(train_loader.sampler)
    valid_loss = valid_loss / len(valid_loader.sampler)

    # Display loss statistics
    print("epoch: {} || training loss: {} || validation loss: {} ".format(
        epoch, round(train_loss, 6), round(valid_loss, 6)))

    # Save model if loss decreases
    if valid_loss <= minimum_validation_loss:
        torch.save(
            {
                "epoch": epoch,
                "model_state_dict": model.state_dict(),
                "optimizer_state_dict": optimizer.state_dict(),
                "loss": loss,
            },
            # change filename if necessary
            "model.pt",
        )
        minimum_validation_loss = valid_loss
        print("Saving New Model")

        # test model every epoch if loss decreases
        test(model, optimizer, epoch, loss, batch_size, "val")
Пример #7
0
from swood import SWood

import test_model as mod

# Tests the stocahstic woodland class on the model contained within test_model.py

# Parameters...
tree_count = 256
option_count = 4

# Get trainning data...
int_dm, real_dm, cats, weight = mod.generate_train()

# Train...
p = ProgBar()
sw = SWood(int_dm,
           real_dm,
           cats,
           tree_count=tree_count,
           option_count=option_count,
           weight=weight,
           callback=p.callback)
del p

print 'Out-of-bag success rate = %.2f%%' % (100.0 * sw.oob_success())
print

# Test...
mod.test(sw.classify)
Пример #8
0
#! /usr/bin/env python

# Copyright 2011 Tom SF Haines

# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at

#   http://www.apache.org/licenses/LICENSE-2.0

# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.


from dec_tree import DecTree
import test_model as mod


# A full decision tree test, with many categories, weights and both discrete and continuous attributes, using the model defined in test_model...


# Get trainning data...
int_dm, real_dm, cats, weights = mod.generate_train()


# Train...
dt = DecTree(int_dm, real_dm, cats, weights)


# Test...
mod.test(dt.classify)
Пример #9
0
            # 7, 8, 9, 10
            new_y.append(2)
    y = new_y
    X = dataset[:, 0:11]

    # Divide Dataset: 20% Test and 80% Train
    x_train, x_test, y_train, y_test = train_test_split(X,
                                                        y,
                                                        test_size=0.2,
                                                        random_state=5)
    x_train_60, x_val, y_train_60, y_val = train_test_split(x_train,
                                                            y_train,
                                                            test_size=0.25,
                                                            random_state=5)

    # best_parameters
    best_parameters = optimize(x_train_60, y_train_60, x_val, y_val)

    # Pass the best parameters to train, and the Train Data
    trained_model = nn_train(x_train, y_train, x_test, y_test, best_parameters)

    trained_model.save('final_model.h5')

    # trained_model = load_model('final_model.h5')

    # Evaluate the model
    evaluate(trained_model, x_train, y_train)

    # Test the model
    test(trained_model, x_test, y_test)
Пример #10
0
import test_model as mod



# Tests the stocahstic woodland class on the model contained within test_model.py, this time with some pruning.

# Parameters...
tree_count = 256
option_count = 4
minimum_size = 10



# Get trainning data...
int_dm, real_dm, cats, weight = mod.generate_train()



# Train...
p = ProgBar()
sw = SWood(int_dm, real_dm, cats, tree_count = tree_count, option_count = option_count, minimum_size = minimum_size, weight = weight, callback=p.callback)
del p

print 'Out-of-bag success rate = %.2f%%'%(100.0*sw. oob_success())
print



# Test...
mod.test(sw.classify)
Пример #11
0
            results_dir = os.path.join(out_dir, 'benchmark-test_e%08d' % epoch)
        elif args.mode == 'benchmark-train':
            images_dir = config.train_images_dir
            labels_dir = config.train_labels_dir
            num_images = config.all_trains
            results_dir = os.path.join(out_dir,
                                       'benchmark-train_e%08d' % epoch)

        vis_per_img = int(math.ceil(num_images / 100.0))

        test_file = os.path.join(
            out_dir, 'performance-%s.csv' % images_dir.split('/')[0])
        dataset = datasets.PixelLinkIC15Dataset(
            images_dir,
            labels_dir,
            train=False,
            all_trains=num_images,
            version=config.version,
            mean=config.mean,
            image_size_test=config.image_size_test)
        test(model,
             dataset,
             epoch,
             out_dir,
             results_dir,
             test_file,
             gpu=config.gpu,
             multi_gpu=config.multi_gpu,
             vis_per_img=vis_per_img)
        # test_model()
Пример #12
0
pt.pytesseract.tesseract_cmd = 'C:/Program Files (x86)/Tesseract-OCR/tesseract'

print('started!!!')

##create the game object
game = CHAIRFED()
print("game object created")

epoch = 2  # Number of games played in training,

##switch to toggle between train and test.
train_mode = 0

if train_mode == 1:
    # Train the model
    hist, loss = train(game, model, epoch, verbose=1)
    print(loss)
    np.savetxt('loss_history.txt', loss)
    print("Training done")
else:
    # Test the model
    hist = test(game, model, epoch, verbose=1)
    print("Testing done")

print('finished!!!')
print(hist)
np.savetxt('win_history.txt', hist)
plt.plot(moving_average_diff(hist))
plt.ylabel('Average number of stableness per quater')
plt.show()
Пример #13
0
    torch.manual_seed(123456)
    dataloaders, dataset_sizes = data_process_lisa(batch_size=128)

    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    model_ft = Net()
    model_ft.apply(weights_init)
    #model_ft.load_state_dict(torch.load('../donemodel/'+args.model))
    model_ft.to(device)

    # model_ft = nn.DataParallel(model,device_ids=[0,1])
    # use multiple gpus

    criterion = nn.CrossEntropyLoss()

    optimizer_ft = optim.Adam(model_ft.parameters(), lr=0.01)

    # Decay LR by a factor of 0.1 every 7 epochs
    exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft,
                                           step_size=10,
                                           gamma=0.1)

    model_ft = pgd_train_model(model_ft,
                               criterion,
                               optimizer_ft,
                               exp_lr_scheduler,
                               num_epochs=30)
    test(model_ft, dataloaders, dataset_sizes)

    torch.save(model_ft.state_dict(),
               '../donemodel/new_linf_model050.pt')  # output model
Пример #14
0
def run(param, name):
	seed = param['seed']
	torch.manual_seed(seed)
	torch.cuda.manual_seed(seed)
	np.random.seed(seed)
	
	dataset, arch, score_name, _ = name.split('_')
	information = np.load('../output/information/{}_{}.npy'.format('_'.join([dataset, arch, score_name]), param['stage']))
	information = np.mean(information, axis=0)
	if param['stage'] == 1:
		model_path = '../output/model/{}_{}.pt'.format('_'.join([dataset, arch]), 0)
		policy_arr = []
		for num_units in param['blocks'][param['arch']]:
			policy_arr.append([1]*num_units)
		if not param['special_TAG'] == '':
			name = '_'.join([
				name,
				param['special_TAG']])
			seed = seed + int(param['special_TAG'])
	else:
		if not param['special_TAG'] == '':
			name = '_'.join([
				name,
				param['special_TAG']])
			seed = seed + int(param['special_TAG'])
		model_path = '../output/model/{}_{}.pt'.format(name, param['stage']-1)
		policy_arr = np.load('../output/policy/{}_{}.npy'.format(name, param['stage']-1), allow_pickle=True)

	train_dataset = fetch_dataset(param['dataset'], split = 'train')
	_, valid_sampler = validate_dataset(train_dataset)
	data_loader = load_dataset(train_dataset, param['batch_size']['test'], 
		param['shuffle']['test'], param['pin_memory'], param['num_workers'], sampler = valid_sampler)
	
	criterion = nn.CrossEntropyLoss()
	if not param['save_policy']:
		acc = []
		blk = []
		for re in param['hyper']:
			policy0=deepcopy(policy_arr)
			policy, blk_comp = Policy(policy0, information, re, param)
			model = eval('models.{}.{}(dataset = \'{}\', policy = {}, model_path = \'{}\').to(device)'.format(param['model'],param['arch'], param['dataset'], policy, model_path))
			model = nn.DataParallel(model, device_ids=param['GPUs']) if param['parallel'] else model
			acc_comp, _, _ = test(data_loader, model, criterion)
			print('Experiment with hyper = {} done.'.format(re))
			acc.append(acc_comp)
			blk.append(blk_comp)
		print(acc)
		print(blk)
	else:
		policy, blk_comp = Policy(policy_arr, information, param['hyper'][0], param)
		np.save('../output/policy/{}_{}.npy'.format(name, param['stage']), policy)
		model = eval('models.{}.{}(dataset = \'{}\', policy = {}, model_path = \'{}\').to(device)'.format(param['model'],param['arch'], param['dataset'], policy, model_path))
		model = nn.DataParallel(model, device_ids=param['GPUs']) if param['parallel'] else model
		acc_comp, _, _ = test(data_loader, model, criterion)
		prune_result = {'acc':acc_comp, 'block':blk_comp}
		if not path.exists('../output/result/prune_result.pkl'):
			results ={}
		else:
			with open('../output/result/prune_result.pkl', "rb", buffering=0) as f:
				results = pickle.load(f, encoding='utf-8')
		results['{}_{}'.format(name,param['stage'])] = prune_result
		with open('../output/result/prune_result.pkl', 'wb') as f:
			pickle.dump(results, f)
Пример #15
0
        model.load_state_dict(checkpoint["model"])
    print("Model built and ready to go !")

    if mode == "train":

        print("Training model...")
        print(torch.cuda.current_device())
        run_epochs(
            hparams=hparams,
            model=model,
            dataset=dataset,
            valid_dataset=valid_dataset,
            model_pre=model_pre,
            valid_every=valid_every,
            save_every=save_every,
        )
    elif mode == "inference":
        print("Inference utterences...")
        test(
            hparams, model, dataset,
            os.path.join(os.path.dirname(checkpoint_path),
                         "inf." + os.path.basename(checkpoint_path)))

    elif mode == "chat":
        print("Chatting with bot...")
        chat(hparams, model, vocab)
    else:
        raise ValueError("Unknown mode !")

    print('Done')
Пример #16
0
        y_train,
        penalty=best_parameters["model_non_regularized"]["Penalty"],
        C=best_parameters["model_non_regularized"]["C"],
        solver=best_parameters["model_non_regularized"]["Solver"],
        multi_class=best_parameters["model_non_regularized"]["MultiClass"],
        max_iter=1000)

    lg = LogisticRegression(
        penalty=best_parameters["model_regularized"]["Penalty"],
        C=best_parameters["model_regularized"]["C"],
        solver=best_parameters["model_regularized"]["Solver"],
        multi_class=best_parameters["model_regularized"]["MultiClass"],
        max_iter=1000)
    cvs = cross_val_score(lg, x_train, y_train, cv=4)

    # Evaluate the model
    print("\n Evaluate the model \n")
    print("\nRegularized:")
    evaluate(trained_model_regularized, x_train, y_train)
    print("\n Cross_validation")
    print(cvs)
    print("\nNon Regularized:")
    evaluate(trained_model_non_regularized, x_train, y_train)

    # Test the model
    print("\n Test the model \n")
    print("\nRegularized:")
    test(trained_model_regularized, x_test, y_test)
    print("\nNon Regularized:")
    test(trained_model_non_regularized, x_test, y_test)
Пример #17
0
train_dataset, test_dataset = TensorDataset(x_train, y_train), TensorDataset(
    x_test, y_test)

# hyperparameters
torch.manual_seed(0)
lr = 0.0001
kernel_size = 5
filters = [8, 16, 32, 32]
bn = False
wn = False
wd = 0.005
batch_size = 32

# model initialization
model = UNetEx(3,
               3,
               filters=filters,
               kernel_size=kernel_size,
               batch_norm=bn,
               weight_norm=wn,
               layers=5).cuda()
optimizer = torch.optim.AdamW(model.parameters(), lr=lr, weight_decay=wd)
epochs = 1000

# model training
best_model = train_model.train(model, epochs, optimizer, train_dataset,
                               test_dataset, channels_weights, batch_size)

# model testing
test_model.test(best_model, x_test[-3:], y_test[-3:], -1)
Пример #18
0
#! /usr/bin/env python

# Copyright 2011 Tom SF Haines

# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at

#   http://www.apache.org/licenses/LICENSE-2.0

# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.

from dec_tree import DecTree
import test_model as mod

# Same as test_tree_model, but includes randomisation of attribute selection...

# Get trainning data...
int_dm, real_dm, cats, weights = mod.generate_train()

# Train...
dt = DecTree(int_dm, real_dm, cats, weights, rand=3)

# Test...
mod.test(dt.classify)
Пример #19
0
pt.pytesseract.tesseract_cmd = 'C:/Program Files (x86)/Tesseract-OCR/tesseract'

print('started!!!')

##create the game object
game = CHAIRFED()
print("game object created")

epoch = 2  # Number of games played in training,

##switch to toggle between train and test. 
train_mode = 0

if train_mode == 1:
    # Train the model
    hist,loss = train(game, model, epoch, verbose=1)
    print(loss)
    np.savetxt('loss_history.txt', loss)
    print("Training done")
else:
    # Test the model
    hist = test(game, model, epoch, verbose=1)
    print("Testing done")

print('finished!!!')
print(hist)
np.savetxt('win_history.txt', hist)
plt.plot(moving_average_diff(hist))
plt.ylabel('Average number of stableness per quater')
plt.show()
Пример #20
0
def train(epoch,
          dataloader,
          my_net,
          loss,
          optimizer,
          scheduler,
          device,
          start_epoch=0,
          test_datasets={},
          vis_per_img=10):
    global trainer, callbacks_cont

    logs = {
        'batch_size': config.batch_size,
        'num_batches': len(dataloader),
        'num_epoch': config.epoch,
        'has_val_data': False,
        'has_regularizers': False
    }
    callbacks_cont.on_train_begin(logs)

    iteration = 0
    for i_epoch in range(start_epoch, epoch):
        epoch_logs = {}
        callbacks_cont.on_epoch_begin(i_epoch, epoch_logs)
        for i_batch, sample in enumerate(dataloader):
            callbacks_cont.on_batch_begin(i_batch)
            scheduler.step(epoch=i_epoch)
            start = time.time()
            images = sample['image'].to(device)
            # print(images.shape, end=" ")
            pixel_masks = sample['pixel_mask'].to(device)
            neg_pixel_masks = sample['neg_pixel_mask'].to(device)
            link_masks = sample['link_mask'].to(device)
            pixel_pos_weights = sample['pixel_pos_weight'].to(device)

            out_cls, out_link = my_net.forward(images)
            # print(out_2)

            total_loss, pixel_loss, link_loss = loss(out_cls, out_link,
                                                     pixel_masks, link_masks,
                                                     neg_pixel_masks,
                                                     pixel_pos_weights)
            #print("iteration %d : " % iteration)  #, end=": ")
            #print("pixel_loss: " + str(pixel_loss.tolist()))  #, end=", ")
            # print("pixel_loss_pos: " + str(pixel_loss_pos.tolist()), end=", ")
            # print("pixel_loss_neg: " + str(pixel_loss_neg.tolist()), end=", ")
            #print("link_loss: " + str(link_loss.tolist()))  #, end=", ")
            # print("link_loss_pos: " + str(link_loss_pos.tolist()), end=", ")
            # print("link_loss_neg: " + str(link_loss_neg.tolist()), end=", ")
            #print("total loss: " + str(total_loss.tolist()))  #, end=", ")

            optimizer.zero_grad()
            total_loss.backward()
            optimizer.step()

            end = time.time()
            # print("time: " + str(end - start))
            iteration += 1

            batch_logs = {'loss': total_loss.tolist()}
            callbacks_cont.on_batch_end(i_batch, batch_logs)

        if i_epoch > 0 and i_epoch % 50 == 0:
            checkpoint = {
                'epoch': i_epoch,
                'state_dict': my_net.state_dict(),
                'optimizer': optimizer.state_dict()
            }
            torch.save(
                checkpoint,
                os.path.join(out_dir, 'snapshots', 'epoch_%08d.mdl' % i_epoch))

            for benchmark_name, datasets_test in test_datasets.items():
                results_dir = os.path.join(
                    out_dir, '%s_e%08d' % (benchmark_name, i_epoch))
                test_file = os.path.join(
                    out_dir, 'performance-%s.csv' %
                    datasets_test.images_dir.split('/')[0])
                test(my_net,
                     datasets_test,
                     i_epoch,
                     out_dir,
                     results_dir,
                     test_file,
                     gpu=config.gpu,
                     multi_gpu=config.multi_gpu,
                     vis_per_img=vis_per_img,
                     weights_preloaded=True)
                my_net.train()

        epoch_logs.update(trainer.history.batch_metrics)
        callbacks_cont.on_epoch_end(i_epoch, logs=epoch_logs)