Example #1
0
def run(args) :

    # pick out the action.
    if args['action'] == 'train':
        engine.train(args)

    else:
        engine.test(args)
Example #2
0
def run(args):
    import engine

    # pick out the action.
    if args['action'] == 'train':
        engine.train(args)
    elif args['action'] == 'continue':
        engine.cont(args)
    else:
        engine.test(args)
Example #3
0
def main(args):
    utils.init_distributed_mode(args)
    print(args)
    device = torch.device(args.device)
    # Data loading code
    print("Loading data")
    dataset_test, num_classes = get_dataset(args.dataset, 'test',
                                            args.root_path, args.use_channel)
    print("Creating data loaders")
    if args.distributed:
        sampler_test = torch.utils.data.distributed.DistributedSampler(
            dataset_test, shuffle=False)
    else:
        sampler_test = torch.utils.data.SequentialSampler(dataset_test)
    dataloader_test = torch.utils.data.DataLoader(dataset_test,
                                                  batch_size=1,
                                                  sampler=sampler_test,
                                                  num_workers=args.workers)
    print("Creating model")
    # maskrcnn_resnet50_fpn
    model = maskrcnn_resnet50_fpn(num_classes=num_classes,
                                  pretrained=args.pretrained)
    # set iou between boxes for nms: 0.7
    model.roi_heads.nms_thresh = 0.3
    # set the max num of rois: 1000
    model.roi_heads.detections_per_img = 1000
    # default: 0.05, 0.5
    model.roi_heads.score_thresh = 0.05
    model.to(device)
    model_without_ddp = model
    if args.distributed:
        model = torch.nn.parallel.DistributedDataParallel(
            model, device_ids=[args.gpu])
        model_without_ddp = model.module
    checkpoint = torch.load(args.resume, map_location='cpu')
    model_without_ddp.load_state_dict(checkpoint['model'])

    test(model,
         dataloader_test,
         device,
         is_vis=args.vis,
         draw_bbox=False,
         vis_dir=args.vis_dir)
Example #4
0
def index(request):
    
    url = ''
    platforms = []
    if 'url' in request.GET:
        form = SiteForm(data=request.GET)
        if form.is_valid():
            platforms = engine.test(form.cleaned_data['url'])
            
    else:
        form = SiteForm()
    
    return render_to_response('index.html', 
        {'form':form,
         'platforms':platforms,
         'media_root': settings.STATIC_ROOT
        },
        context_instance=RequestContext(request))
Example #5
0
def index(request):
    
    url = ''
    site = None
    
    if 'url' in request.GET:
        form = SiteForm(data=request.GET)
        if form.is_valid():
            site = engine.test(form.cleaned_data['url'], 'force' in request.GET)
    else:
        form = SiteForm()
    
    return render_to_response('index.html', 
        {'form':form,
         'site':site,
         'media_root': settings.STATIC_ROOT,
         'next': request.get_full_path()
        },
        context_instance=RequestContext(request))
Example #6
0
    optimizer = optim.SGD(params_cnn_fnn,
                          lr=args.lr,
                          momentum=0.6,
                          weight_decay=0.0005)

    lr_scheduler = torch.optim.lr_scheduler.CyclicLR(optimizer,
                                                     base_lr=1e-3,
                                                     max_lr=6e-3)
    # lr_scheduler = CosineAnnealingLR(optimizer, 50, eta_min=1e-3, last_epoch=-1)

    model_save_path = args.save_final_model_path

    print("Start training")
    start_time = time.time()
    for epoch in range(args.epochs):
        print("Executing Epoch: " + str(epoch))
        engine.train(model_cnn,
                     model_fnn,
                     optimizer,
                     train_loader,
                     device,
                     epoch,
                     save=model_save_path)
        lr_scheduler.step()
        if epoch % 10 == 0:
            print("Testing Epoch: " + str(epoch))
            engine.test(model_cnn, model_fnn, test_loader, device, epoch)

    total_time = time.time() - start_time
    total_time_str = str(datetime.timedelta(seconds=int(total_time)))
    print('Training time {}'.format(total_time_str))
Example #7
0
test([
    [ 'get chain', 'You cannot get that.\n'],
    [ 'get egg', 'You cannot get that.\n' ],
    [ 'go west', '''You are in a beautiful garden - there is a well in front of you.
There is a door going east from here.
There is a gate going outside from here.
You see a frog on the floor.
You see a chain on the floor.
'''],
    [ 'unlock gate', 'Unlock gate how?\n'],
    [ 'unlock gate with key', 'You don\'t have the key.\n'],
    [ 'get chain', 'You are now carrying the chain.\n'],
    [ 'drop bucket', 'You don\'t have the bucket.\n'],
    [ 'drop chain', 'You dropped the chain in the garden.\n'],
    [ 'get chain', 'You are now carrying the chain.\n'],
    [ 'go east', '''You are in the bedroom of a wizards house - there is a wizard snoring loudly on the bed.
There is a door going west from here.
There is a stairway going upstairs from here.
You see a bucket on the floor.
You see a bottle on the floor.
'''],
    [ 'get bucket', 'You are now carrying the bucket.\n'],
    [ 'go upstairs', '''You are in the attic of the wizards house - there is a giant welding torch in the corner.
There is a stairway going downstairs from here.
'''],
    [ 'weld chain to bucket', 'The chain is now securely welded to the bucket.\n'],
    [ 'go downstairs', '''You are in the bedroom of a wizards house - there is a wizard snoring loudly on the bed.
There is a door going west from here.
There is a stairway going upstairs from here.
You see a bottle on the floor.
'''],
    [ 'go west', '''You are in a beautiful garden - there is a well in front of you.
There is a door going east from here.
There is a gate going outside from here.
You see a frog on the floor.
'''],
    [ 'dunk bucket in well', 'The bucket is now full of water.\n'],
    [ 'go east', '''You are in the bedroom of a wizards house - there is a wizard snoring loudly on the bed.
There is a door going west from here.
There is a stairway going upstairs from here.
You see a bottle on the floor.
'''],
    [ 'splash wizard with bucket', '''The wizard awakens from his slumber.
He greets you warmly and drops a key. But he is confused why you woke him. What now?
'''],
    [ 'splash wizard with bucket', 'You have already done that.\n' ],
    [ 'get key', 'You are now carrying the key.\n'],
    [ 'go west', '''You are in a beautiful garden - there is a well in front of you.
There is a door going east from here.
There is a gate going outside from here.
You see a frog on the floor.
'''],
    [ 'break gate', 'I don\'t know how to break gate.\n'],
    [ 'unlock gate', 'Unlock gate how?\n'],
    [ 'unlock gate with key', 'You unlock the gate! It\'s very dark outside...\n'],
    [ 'go outside', '''You are in the middle of the deep woods. only a few rays of sunlight reach the ground from here. The porch of the wizard\'s house is to your right.
There is a gate going inside from here.
You see a cake on the floor.
'''],
    [ 'go', 'Go what?\n'],
    [ 'get cake', '''You are now carrying the cake.

YES! CAKE! YOU WIN THE GAME 100000000 TIMES!!!
''']]
)
Example #8
0
EPOCHS = 10
BATCH_SIZE = 32
LR = 1e-3
MAX_LOSS = 9999

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
transform = transforms.Compose(
    [transforms.ToTensor(),
     transforms.Normalize((0.1307, ), (0.3081, ))])

train_set, test_set = dataset.create_dataset(transform)

train_loader = torch.utils.data.DataLoader(train_set,
                                           batch_size=BATCH_SIZE,
                                           shuffle=True)
test_loader = torch.utils.data.DataLoader(test_set,
                                          batch_size=BATCH_SIZE,
                                          shuffle=False)

model = model.Net().to(device)

optimizer = optim.Adam(model.parameters(), lr=LR)
scheduler = StepLR(optimizer, step_size=1)

for epoch in range(1, EPOCHS + 1):
    train_loss = engine.train(model, device, train_loader, optimizer, epoch)
    test_loss = engine.test(model, device, test_loader)
    scheduler.step()
    if test_loss < MAX_LOSS:
        torch.save(model.state_dict(), "mnist_cnn.pt")
Example #9
0
                device=DEVICE,
                batch_size=BATCH_SIZE
        )

    if TEST:
        net = restore_net(PATH_TO_TEST_MODEL)
        net.eval()
        net.to(DEVICE)

        # after the training run function for train/val/test loader
        loader = test_loader

        ecgs, y_true, y_pred = test(
                net=net,
                test_loader=loader,
                device=DEVICE,
                batch_size=BATCH_SIZE,
                plot_ecg=False,
                plot_ecg_windows_size=WINDOWS_SIZE
        )

        eval(
            ecgs=ecgs,
            y_true=y_true,
            y_pred=y_pred,
            labels=[0, 1, 2, 3, 4],
            target_names=['none', 'p_wave', 'qrs', 't_wave', 'extrasystole'],
            plot_acc=True,
            plot_loss=True,
            plot_conf_matrix=True,
            plot_ecg=True,
            plot_ecg_windows_size=WINDOWS_SIZE
Example #10
0
def run():
    Seed = 1234
    random.seed(Seed)
    np.random.seed(Seed)
    torch.manual_seed(Seed)
    torch.cuda.manual_seed(Seed)
    torch.backends.cudnn.deterministic = True
    train, valid, test, SRC, TRG = dataset.create_dataset()
    train_iterator, valid_iterator, test_iterator = BucketIterator.splits(
        (train, valid, test),
        sort_key=lambda x: len(x.source),
        batch_size=config.BATCH_SIZE,
        device=config.device)

    INPUT_DIM = len(SRC.vocab)
    OUTPUT_DIM = len(TRG.vocab)
    HID_DIM = 256
    ENC_LAYERS = 3
    DEC_LAYERS = 3
    ENC_HEADS = 8
    DEC_HEADS = 8
    ENC_PF_DIM = 512
    DEC_PF_DIM = 512
    ENC_DROPOUT = 0.1
    DEC_DROPOUT = 0.1

    enc = model.Encoder(INPUT_DIM, HID_DIM, ENC_LAYERS, ENC_HEADS, ENC_PF_DIM,
                        ENC_DROPOUT, config.device)

    dec = model.Decoder(OUTPUT_DIM, HID_DIM, DEC_LAYERS, DEC_HEADS, DEC_PF_DIM,
                        DEC_DROPOUT, config.device)

    SRC_PAD_IDX = SRC.vocab.stoi[SRC.pad_token]
    TRG_PAD_IDX = TRG.vocab.stoi[TRG.pad_token]

    model_tr = model.Seq2Seq(enc, dec, SRC_PAD_IDX, TRG_PAD_IDX,
                             config.device).to(config.device)

    def initialize_weights(m):
        if hasattr(m, 'weight') and m.weight.dim() > 1:
            nn.init.xavier_uniform_(m.weight.data)

    model_tr.apply(initialize_weights)

    optimizer = optim.Adam(model_tr.parameters(), lr=config.LEARNING_RATE)

    TRG_PAD_IDX = TRG.vocab.stoi[TRG.pad_token]

    criterion = nn.CrossEntropyLoss(ignore_index=TRG_PAD_IDX)

    def epoch_time(start_time, end_time):
        elapsed_time = end_time - start_time
        elapsed_mins = int(elapsed_time / 60)
        elapsed_secs = int(elapsed_time - (elapsed_mins * 60))
        return elapsed_mins, elapsed_secs

    if (args.action == 'train'):
        best_valid_loss = float('inf')

        for epoch in tqdm(range(config.N_EPOCHS)):

            start_time = time.time()

            train_loss = config.train(model_tr, train_iterator, optimizer,
                                      criterion, config.CLIP)
            valid_loss = config.evaluate(model_tr, valid_iterator, criterion)

            end_time = time.time()

            epoch_mins, epoch_secs = epoch_time(start_time, end_time)

            if valid_loss < best_valid_loss:
                best_valid_loss = valid_loss
                torch.save(model_tr.state_dict(), 'model.bin')

            with open(config.RESULTS_SAVE_FILE, 'a') as f:
                print(
                    f'Epoch: {epoch+1:02} | Time: {epoch_mins}m {epoch_secs}s',
                    file=f)
                print(
                    f'\tTrain Loss: {train_loss:.3f} | Train PPL: {math.exp(train_loss):7.3f}',
                    file=f)
                print(
                    f'\t Val. Loss: {valid_loss:.3f} |  Val. PPL: {math.exp(valid_loss):7.3f}',
                    file=f)

    elif (args.action == 'test'):
        model_tr.load_state_dict(torch.load('model.bin'))

        test_loss, t, o = engine.test(model_tr, test_iterator, criterion, TRG)

        metric_val = 0

        for i in range(len(t)):
            metric_val = metric_val + metric_utils.compute_metric(
                o[i], 1.0, t[i])

        print('Nl2Cmd Metric  | ', metric_val / len(t))

        print(
            f'| Test Loss: {test_loss:.3f} | Test PPL: {math.exp(test_loss):7.3f} |'
        )

    elif (args.action == 'save_vocab'):
        print('Source Vocab Length', len(SRC.vocab))
        print('Target vocab length', len(TRG.vocab))
        s1 = '\n'.join(k for k in SRC.vocab.itos)
        s2 = '\n'.join(k for k in TRG.vocab.itos)
        with open('NL_vocabulary.txt', 'w') as f:
            f.write(s1)
        with open('Bash_vocabulary.txt', 'w') as f:
            f.write(s2)
Example #11
0
def _train(num_epochs, resume, net, device, directory, trainloader,
           testloader):
    writer = SummaryWriter()

    net = net.to(device)
    if device == 'cuda':
        net = torch.nn.DataParallel(net)
        cudnn.benchmark = True

    criterion = torch.nn.CrossEntropyLoss()
    optimizer = optim.SGD(net.parameters(),
                          lr=0.1,
                          momentum=0.9,
                          weight_decay=1e-3)  # usual 5e-4

    # load from checkpoint
    if resume:
        print('==> Resuming from checkpoint...')
        checkpoint = torch.load(f'{directory}/last_ckpt.pth')
        net.load_state_dict(checkpoint['net'])
        optimizer.load_state_dict(checkpoint['optim'])
        start_epoch = checkpoint['epoch'] + 1
        best_acc = torch.load(f'{directory}/best_ckpt.pth')['acc']
        scheduler = torch.optim.lr_scheduler.MultiStepLR(
            optimizer,
            milestones=[60, 70, 75, 80, 85, 95],
            gamma=0.1,
            verbose=True,
            last_epoch=checkpoint['epoch'])

    # create history file
    else:
        best_acc = 0
        start_epoch = 0
        scheduler = torch.optim.lr_scheduler.MultiStepLR(
            optimizer, milestones=[60, 80, 100, 150], gamma=0.1, verbose=True)
        with open(f"{directory}/history.txt", "w") as f:
            f.write("epoch,train_loss,train_acc,test_loss,test_acc\n")

    # loop through number of epochs
    for epoch in range(start_epoch, num_epochs):
        train_loss, train_acc = train_one_epoch(net, optimizer, criterion,
                                                trainloader, epoch, device)
        test_loss, test_acc = test(net, criterion, testloader, device)

        # save losses and accuracies into txt file
        with open(f"{directory}/history.txt", "a") as f:
            f.write(
                f"{epoch},{train_loss:.4f},{train_acc:.4f},{test_loss:.4f},{test_acc:.4f}\n"
            )

        # save checkpoint
        state = {
            'net': net.state_dict(),
            'acc': test_acc,
            'epoch': epoch,
            'optim': optimizer.state_dict()
        }
        torch.save(state, f'{directory}/last_ckpt.pth')
        if test_acc > best_acc:
            torch.save(state, f'{directory}/best_ckpt.pth')
            best_acc = test_acc

        scheduler.step()

        writer.add_scalar('train/loss', train_loss, epoch)
        writer.add_scalar('test/loss', test_loss, epoch)
        writer.add_scalar('train/acc', train_acc, epoch)
        writer.add_scalar('test/acc', test_acc, epoch)

    writer.close()
Example #12
0
import torch
import torch.backends.cudnn as cudnn

from model import ResNet50
from datasets import BinaryDatasets, MultiwaySubDatasets, MultiwayDatasets
from engine import test

_, testloader = BinaryDatasets(['cat'])
# _, testloader = BinaryDatasets(['bird'], ['cat', 'deer', 'dog', 'frog', 'horse'])
# # _, testloader = MultiwaySubDatasets(['airplane', 'automobile', 'ship', 'truck'])

net = ResNet50(2)
net.to("cuda")
net = torch.nn.DataParallel(net)
cudnn.benchmark = True
net.eval()

for class_ in cfg.CLASSES_TO_RUN:
    ckpt = torch.load(f"checkpoints/10way/{class_}/last_ckpt.pth")

    net.load_state_dict(ckpt['net'])

    criterion = torch.nn.CrossEntropyLoss()

    # neg = cfg.CLASSES_TO_RUN.copy()
    # neg.remove(class_)
    # _, testloader = BinaryDatasets([class_], neg)

    test_loss, acc = test(net, criterion, testloader, "cuda")

    print(f"{acc:.4f}")
Example #13
0
lsepLoss = losses.LSEPLoss()
rankingLoss = torch.nn.MarginRankingLoss(margin=args.margin)
criterion = [
    mseLoss, bceLoss, rankingLoss, tripletLoss, warpLoss, lsepLoss, ceLoss
]

# optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)
optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad,
                                    model.parameters()),
                             lr=args.lr,
                             amsgrad=True)

bestAcc = 0
for epoch in range(startEpoch, args.epochs + 1):
    engine.train(trainLoader, model, criterion, optimizer, epoch, plotter,
                 writer, args.log, classCount, args.consiW, args.alignW,
                 args.visEncW, args.labEncW, args.rankW, device, args.topK,
                 args.theta)
    acc = engine.test(testLoader, model, criterion, epoch, plotter, writer,
                      classCount, args.consiW, args.alignW, args.visEncW,
                      args.labEncW, args.rankW, device, args.topK, args.theta)

    # remember best acc and save checkpoint
    is_best = acc > bestAcc
    bestAcc = max(acc, bestAcc)
    utils.save_checkpoint(
        {
            'epoch': epoch + 1,
            'stateDict': model.state_dict(),
            'perfMetric': bestAcc,
        }, is_best, args.name)
Example #14
0
from ui import cursesUI, pygameUI, terminalUI
import argparse
import sys

import logging
logger = logging.getLogger(__name__)

parser = argparse.ArgumentParser()
parser.add_argument('--ui',
                    help="Choose the output UI",
                    type=str,
                    choices=['terminal', 'curses', 'pygame'],
                    default="curses")
parser.add_argument('--test', type=str, choices=['engine'])

if __name__ == '__main__':
    logging.basicConfig(
        filename="Hoplite.log",
        format="%(module)s:%(lineno)d %(levelname)s %(message)s",
        level="DEBUG")
    results = parser.parse_args(sys.argv[1:])
    if results.ui == 'curses':
        cursesUI()
    elif results.test == "engine":
        import engine
        engine.test()