def train(net, training_inputs, training_labels, test_inputs, test_labels, EPOCHS, l_rate, BATCH_SIZE):
	net.train()                                                                         
	optimiser = optim.Adam(net.parameters(), lr = l_rate)									   # net.parameters(): all of the adjustable parameters in our network. lr: a hyperparameter adjusts the size of the step that the optimizer will take to minimise the loss.
	loss_function = nn.MSELoss(reduction='mean')

	X = Variable(torch.Tensor(training_inputs))
	y = Variable(torch.Tensor(training_labels))

	E_va_list = []
	GL_MAX = 3
	for epoch in range(EPOCHS):
		for i in tqdm(range(0, len(X), BATCH_SIZE)):
			batch_X = X[i:i+BATCH_SIZE]
			batch_y = y[i:i+BATCH_SIZE]
			hidden = net.init_hidden(batch_X)
			optimiser.zero_grad()
			outputs, _ = net(batch_X, hidden)
			loss = loss_function(outputs, batch_y)
			loss.backward()
			optimiser.step()

		E_va = test(test_inputs, test_labels, net)
		E_va_list.append(E_va)

		GL = 100*((E_va/min(E_va_list)) - 1)

		if GL > GL_MAX:
			return min(E_va_list), (E_va_list.index(min(E_va_list)) + 1)

	return min(E_va_list), (E_va_list.index(min(E_va_list)) + 1)
 def show_pre_res(self):
     res = ''
     if 'train1' in self.model_path:
         res = test1.test(self.pic_path_pro, self.model_path)
     elif 'train2' in self.model_path:
         res = test2.test(self.pic_path_pro, self.model_path)
     elif 'train3' in self.model_path:
         res = test3.test(self.pic_path_pro, self.model_path)
     self.show_res_text.setText(res)
EPOCHS = 100
BATCH_SIZE = 50
LR = 0.0007

# Instantiate the network and prepare data
avg_mse = 1
while avg_mse > 0.9:
    net = Net(HN1, HN2)
    training_inputs = training_data[:, 0:4]
    training_labels = training_data[:, 4:]
    test_inputs = testing_data[:, 0:4]
    test_labels = testing_data[:, 4:]

    # Train and test the network
    train(net, training_inputs, training_labels, EPOCHS, LR, BATCH_SIZE)
    avg_mse, predictions_online, predictions_offline = test(
        test_inputs, test_labels, net)
    print(avg_mse)

predictions_online_inverse_transform = scaler_test.inverse_transform(
    predictions_online)
predictions_offline_inverse_transform = scaler_test.inverse_transform(
    predictions_offline)

online = pd.DataFrame(predictions_online_inverse_transform)
offline = pd.DataFrame(predictions_offline_inverse_transform)
avg_mse = pd.DataFrame([avg_mse, 0])

online.to_excel(
    'Data3/Optimised_Networks/manual_online3 {x}_{y}-{z}_{a}_{b}_{c}.xlsx'.
    format(x=HL, y=HN1, z=HN2, a=EPOCHS, b=LR, c=BATCH_SIZE))
offline.to_excel(
Example #4
0
import csv
import random
import numpy as np
import math

from test2 import la
from test2 import laa
from test2 import convtest
from test2 import test

test()
with open('index_10\input10.csv',
          newline='') as csvfile, open('index_10\conv1.weight10.csv',
                                       newline='') as csvfile2:
    rows = csv.reader(csvfile, delimiter=',')
    inputdata = np.asarray(list(rows))  # input = 32*32*3
    # print("len of (input.csv) = ", len(inputdata), "type = ", type(inputdata))
    rowss = csv.reader(csvfile2, delimiter=',')
    conv1weight = np.asarray(list(rowss))

# new array for input R G B
number = int(inputdata.shape[0])
single = int(inputdata.shape[0] / 3)  # 1024 = 32^2
l = int(math.sqrt(single))  # input 單邊長 : 32
i_r = np.zeros((1, single))
i_g = np.zeros((1, single))
i_b = np.zeros((1, single))

# print("i_r = ",i_r.size)
# print("single = ",single)
# print("number = ",number)
Example #5
0
def train():
    cfg = opt.cfg
    data = opt.data
    img_size = opt.img_size
    epochs = 1 if opt.prebias else opt.epochs  # 500200 batches at bs 64, 117263 images = 273 epochs
    batch_size = opt.batch_size
    accumulate = opt.accumulate  # effective bs = batch_size * accumulate = 16 * 4 = 64
    weights = opt.weights  # initial training weights

    if 'pw' not in opt.arc:  # remove BCELoss positive weights
        hyp['cls_pw'] = 1.
        hyp['obj_pw'] = 1.

    # Initialize
    init_seeds()
    multi_scale = opt.multi_scale

    if multi_scale:
        img_sz_min = round(img_size / 32 / 1.5) + 1
        img_sz_max = round(img_size / 32 * 1.5) - 1
        img_size = img_sz_max * 32  # initiate with maximum multi_scale size
        print('Using multi-scale %g - %g' % (img_sz_min * 32, img_size))

    # Configure run
    data_dict = parse_data_cfg(data)
    train_path = data_dict['train']
    nc = int(data_dict['classes'])  # number of classes

    # Remove previous results
    for f in glob.glob('*_batch*.jpg') + glob.glob(results_file):
        os.remove(f)

    # Initialize model
    model = Darknet(cfg, arc=opt.arc).to(device)

    # Optimizer
    pg0, pg1 = [], []  # optimizer parameter groups
    for k, v in dict(model.named_parameters()).items():
        if 'Conv2d.weight' in k:
            pg1 += [v]  # parameter group 1 (apply weight_decay)
        else:
            pg0 += [v]  # parameter group 0

    if opt.adam:
        optimizer = optim.Adam(pg0, lr=hyp['lr0'])
        # optimizer = AdaBound(pg0, lr=hyp['lr0'], final_lr=0.1)
    else:
        optimizer = optim.SGD(pg0,
                              lr=hyp['lr0'],
                              momentum=hyp['momentum'],
                              nesterov=True)
    optimizer.add_param_group({
        'params': pg1,
        'weight_decay': hyp['weight_decay']
    })  # add pg1 with weight_decay
    del pg0, pg1

    cutoff = -1  # backbone reaches to cutoff layer
    start_epoch = 0
    best_fitness = 0.
    attempt_download(weights)
    if weights.endswith('.pt'):  # pytorch format
        # possible weights are 'last.pt', 'yolov3-spp.pt', 'yolov3-tiny.pt' etc.
        if opt.bucket:
            os.system('gsutil cp gs://%s/last.pt %s' %
                      (opt.bucket, last))  # download from bucket
        chkpt = torch.load(weights, map_location=device)

        # load model
        # if opt.transfer:
        chkpt['model'] = {
            k: v
            for k, v in chkpt['model'].items()
            if model.state_dict()[k].numel() == v.numel()
        }
        model.load_state_dict(chkpt['model'], strict=False)
        # else:
        #    model.load_state_dict(chkpt['model'])

        # load optimizer
        if chkpt['optimizer'] is not None:
            optimizer.load_state_dict(chkpt['optimizer'])
            best_fitness = chkpt['best_fitness']

        # load results
        if chkpt.get('training_results') is not None:
            with open(results_file, 'w') as file:
                file.write(chkpt['training_results'])  # write results.txt

        start_epoch = chkpt['epoch'] + 1
        del chkpt

    elif len(weights) > 0:  # darknet format
        # possible weights are 'yolov3.weights', 'yolov3-tiny.conv.15',  'darknet53.conv.74' etc.
        cutoff = load_darknet_weights(model, weights)

    if opt.transfer or opt.prebias:  # transfer learning edge (yolo) layers
        nf = int(model.module_defs[model.yolo_layers[0] -
                                   1]['filters'])  # yolo layer size (i.e. 255)

        if opt.prebias:
            for p in optimizer.param_groups:
                # lower param count allows more aggressive training settings: i.e. SGD ~0.1 lr0, ~0.9 momentum
                p['lr'] *= 100  # lr gain
                if p.get('momentum') is not None:  # for SGD but not Adam
                    p['momentum'] *= 0.9

        for p in model.parameters():
            if opt.prebias and p.numel() == nf:  # train (yolo biases)
                p.requires_grad = True
            elif opt.transfer and p.shape[
                    0] == nf:  # train (yolo biases+weights)
                p.requires_grad = True
            else:  # freeze layer
                p.requires_grad = False

    # Scheduler https://github.com/ultralytics/yolov3/issues/238
    # lf = lambda x: 1 - x / epochs  # linear ramp to zero
    # lf = lambda x: 10 ** (hyp['lrf'] * x / epochs)  # exp ramp
    # lf = lambda x: 1 - 10 ** (hyp['lrf'] * (1 - x / epochs))  # inverse exp ramp
    # scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)
    # scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=range(59, 70, 1), gamma=0.8)  # gradual fall to 0.1*lr0
    scheduler = lr_scheduler.MultiStepLR(
        optimizer,
        milestones=[round(opt.epochs * x) for x in [0.8, 0.9]],
        gamma=0.1)
    scheduler.last_epoch = start_epoch - 1

    # # Plot lr schedule
    # y = []
    # for _ in range(epochs):
    #     scheduler.step()
    #     y.append(optimizer.param_groups[0]['lr'])
    # plt.plot(y, label='LambdaLR')
    # plt.xlabel('epoch')
    # plt.ylabel('LR')
    # plt.tight_layout()
    # plt.savefig('LR.png', dpi=300)

    # Mixed precision training https://github.com/NVIDIA/apex
    if mixed_precision:
        model, optimizer = amp.initialize(model,
                                          optimizer,
                                          opt_level='O1',
                                          verbosity=0)

    # Initialize distributed training
    if torch.cuda.device_count() > 1:
        dist.init_process_group(
            backend='nccl',  # 'distributed backend'
            init_method=
            'tcp://127.0.0.1:9999',  # distributed training init method
            world_size=1,  # number of nodes for distributed training
            rank=0)  # distributed training node rank
        model = torch.nn.parallel.DistributedDataParallel(model)
        model.yolo_layers = model.module.yolo_layers  # move yolo layer indices to top level

    # Dataset
    dataset = LoadImagesAndLabels(
        train_path,
        img_size,
        batch_size,
        augment=True,
        hyp=hyp,  # augmentation hyperparameters
        rect=opt.rect,  # rectangular training
        image_weights=opt.img_weights,
        cache_labels=True if epochs > 10 else False,
        cache_images=False if opt.prebias else opt.cache_images)

    # Dataloader
    dataloader = torch.utils.data.DataLoader(
        dataset,
        batch_size=batch_size,
        num_workers=min([os.cpu_count(), batch_size, 16]),
        shuffle=not opt.
        rect,  # Shuffle=True unless rectangular training is used
        pin_memory=True,
        collate_fn=dataset.collate_fn)

    # Start training
    model.nc = nc  # attach number of classes to model
    model.arc = opt.arc  # attach yolo architecture
    model.hyp = hyp  # attach hyperparameters to model
    # model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device)  # attach class weights   ##########
    torch_utils.model_info(model, report='summary')  # 'full' or 'summary'
    nb = len(dataloader)
    maps = np.zeros(nc)  # mAP per class
    results = (
        0, 0, 0, 0, 0, 0, 0
    )  # 'P', 'R', 'mAP', 'F1', 'val GIoU', 'val Objectness', 'val Classification'
    t0 = time.time()
    print('Starting %s for %g epochs...' %
          ('prebias' if opt.prebias else 'training', epochs))
    for epoch in range(
            start_epoch, epochs
    ):  # epoch ------------------------------------------------------------------
        model.train()
        print(('\n' + '%10s' * 8) % ('Epoch', 'gpu_mem', 'GIoU', 'obj', 'cls',
                                     'total', 'targets', 'img_size'))

        # Freeze backbone at epoch 0, unfreeze at epoch 1 (optional)
        freeze_backbone = False
        if freeze_backbone and epoch < 2:
            for name, p in model.named_parameters():
                if int(name.split('.')[1]) < cutoff:  # if layer < 75
                    p.requires_grad = False if epoch == 0 else True

        # Update image weights (optional)
        if dataset.image_weights:
            w = model.class_weights.cpu().numpy() * (1 -
                                                     maps)**2  # class weights
            image_weights = labels_to_image_weights(dataset.labels,
                                                    nc=nc,
                                                    class_weights=w)
            dataset.indices = random.choices(range(dataset.n),
                                             weights=image_weights,
                                             k=dataset.n)  # rand weighted idx

        mloss = torch.zeros(4).to(device)  # mean losses
        pbar = tqdm(enumerate(dataloader), total=nb)  # progress bar
        for i, (
                imgs, targets, paths, _
        ) in pbar:  # batch -------------------------------------------------------------
            ni = i + nb * epoch  # number integrated batches (since train start)
            imgs = imgs.to(device)
            targets = targets.to(device)

            # Multi-Scale training
            if multi_scale:
                if ni / accumulate % 10 == 0:  #  adjust (67% - 150%) every 10 batches
                    img_size = random.randrange(img_sz_min,
                                                img_sz_max + 1) * 32
                sf = img_size / max(imgs.shape[2:])  # scale factor
                if sf != 1:
                    ns = [
                        math.ceil(x * sf / 32.) * 32 for x in imgs.shape[2:]
                    ]  # new shape (stretched to 32-multiple)
                    imgs = F.interpolate(imgs,
                                         size=ns,
                                         mode='bilinear',
                                         align_corners=False)

            # Plot images with bounding boxes
            if ni == 0:
                fname = 'train_batch%g.jpg' % i
                plot_images(imgs=imgs,
                            targets=targets,
                            paths=paths,
                            fname=fname)
                if tb_writer:
                    tb_writer.add_image(fname,
                                        cv2.imread(fname)[:, :, ::-1],
                                        dataformats='HWC')

            # Hyperparameter burn-in
            # n_burn = nb - 1  # min(nb // 5 + 1, 1000)  # number of burn-in batches
            # if ni <= n_burn:
            #     for m in model.named_modules():
            #         if m[0].endswith('BatchNorm2d'):
            #             m[1].momentum = 1 - i / n_burn * 0.99  # BatchNorm2d momentum falls from 1 - 0.01
            #     g = (i / n_burn) ** 4  # gain rises from 0 - 1
            #     for x in optimizer.param_groups:
            #         x['lr'] = hyp['lr0'] * g
            #         x['weight_decay'] = hyp['weight_decay'] * g

            # Run model
            pred = model(imgs)

            # Compute loss
            loss, loss_items = compute_loss(pred, targets, model)
            if not torch.isfinite(loss):
                print('WARNING: non-finite loss, ending training ', loss_items)
                return results

            # Scale loss by nominal batch_size of 64
            loss *= batch_size / 64

            # Compute gradient
            if mixed_precision:
                with amp.scale_loss(loss, optimizer) as scaled_loss:
                    scaled_loss.backward()
            else:
                loss.backward()

            # Accumulate gradient for x batches before optimizing
            if ni % accumulate == 0:
                optimizer.step()
                optimizer.zero_grad()

            # Print batch results
            mloss = (mloss * i + loss_items) / (i + 1)  # update mean losses
            mem = torch.cuda.memory_cached() / 1E9 if torch.cuda.is_available(
            ) else 0  # (GB)
            s = ('%10s' * 2 + '%10.3g' * 6) % ('%g/%g' % (epoch, epochs - 1),
                                               '%.3gG' % mem, *mloss,
                                               len(targets), img_size)
            pbar.set_description(s)

            # end batch ------------------------------------------------------------------------------------------------

        # Update scheduler
        scheduler.step()

        # Process epoch results
        final_epoch = epoch + 1 == epochs
        if opt.prebias:
            print_model_biases(model)
        else:
            # Calculate mAP (always test final epoch, skip first 10 if opt.nosave)
            if not (opt.notest or (opt.nosave and epoch < 10)) or final_epoch:
                with torch.no_grad():
                    results, maps = test2.test(
                        cfg,
                        data,
                        batch_size=batch_size,
                        img_size=opt.img_size,
                        model=model,
                        conf_thres=0.001
                        if final_epoch and epoch > 0 else 0.1,  # 0.1 for speed
                        save_json=final_epoch and epoch > 0
                        and 'coco.data' in data)

        # Write epoch results
        with open(results_file, 'a') as f:
            f.write(s + '%10.3g' * 7 % results +
                    '\n')  # P, R, mAP, F1, test_losses=(GIoU, obj, cls)

        # Write Tensorboard results
        if tb_writer:
            x = list(mloss) + list(results)
            titles = [
                'GIoU', 'Objectness', 'Classification', 'Train loss',
                'Precision', 'Recall', 'mAP', 'F1', 'val GIoU',
                'val Objectness', 'val Classification'
            ]
            for xi, title in zip(x, titles):
                tb_writer.add_scalar(title, xi, epoch)

        # Update best mAP
        fitness = results[2]  # mAP
        if fitness > best_fitness:
            best_fitness = fitness

        # Save training results
        save = (not opt.nosave) or (final_epoch
                                    and not opt.evolve) or opt.prebias
        if save:
            with open(results_file, 'r') as f:
                # Create checkpoint
                chkpt = {
                    'epoch':
                    epoch,
                    'best_fitness':
                    best_fitness,
                    'training_results':
                    f.read(),
                    'model':
                    model.module.state_dict()
                    if type(model) is nn.parallel.DistributedDataParallel else
                    model.state_dict(),
                    'optimizer':
                    None if final_epoch else optimizer.state_dict()
                }

            # Save last checkpoint
            torch.save(chkpt, last)
            if opt.bucket and not opt.prebias:
                os.system('gsutil cp %s gs://%s' %
                          (last, opt.bucket))  # upload to bucket

            # Save best checkpoint
            if best_fitness == fitness:
                torch.save(chkpt, best)

            # Save backup every 10 epochs (optional)
            if epoch > 0 and epoch % 10 == 0:
                torch.save(chkpt, wdir + 'backup%g_retrain.pt' % epoch)

            # Delete checkpoint
            del chkpt

        # end epoch ----------------------------------------------------------------------------------------------------

    # end training
    if len(opt.name):
        os.rename('results.txt', 'results_%s.txt' % opt.name)
    plot_results()  # save as results.png
    print('%g epochs completed in %.3f hours.\n' % (epoch - start_epoch + 1,
                                                    (time.time() - t0) / 3600))
    dist.destroy_process_group() if torch.cuda.device_count() > 1 else None
    torch.cuda.empty_cache()
    return results
Example #6
0
async def main():
    a = glob.glob('test*')
    print(a)
    tasks = [asyncio.ensure_future(test1.test()), asyncio.ensure_future(test2.test())]
    await asyncio.gather(*tasks)
init_state = copy.deepcopy(rnn.state_dict())
for lr in LR:
    MSEs = []
    for index, subset in enumerate(subset_train_list):
        subset.value = np.array(subset.value)
        subset_test_list[index].value = np.array(subset_test_list[index].value)

        rnn.load_state_dict(init_state)
        training_inputs = subset.value[:, 0:5]
        training_labels = subset.value[:, 5:]
        test_inputs = subset_test_list[index].value[:, 0:5]
        test_labels = subset_test_list[index].value[:, 5:]

        training_inputs = np.split(training_inputs, 505)
        training_labels = np.split(training_labels, 505)

        test_inputs = np.array([test_inputs])
        test_labels = np.array([test_labels])
        
        train(rnn, training_inputs, training_labels, EPOCHS, lr, BATCH_SIZE)
        avg_mse = test(test_inputs, test_labels, rnn)
        MSEs.append(avg_mse)

    avg_mse = sum(MSEs)/len(MSEs)
    MODELS['{a}_{x}_{z}_{b}'.format(a=HL, x=HN1, z=EPOCHS, b=lr)] = avg_mse

with open('Data2/Search/k_fold_results_{x}HL_lr.csv'.format(x=HL), 'w') as f:
    for key in MODELS.keys():
        f.write("%s: %s\n"%(key, MODELS[key]))

print(MODELS)
Example #8
0
def getTest():
    source = request.form.get('source')
    return test(source)
Example #9
0
import global_var as gl
import test2

print(gl.point_cnt)
gl.point_cnt = 5555
test2.test()
print(gl.point_cnt)
gl.point_cnt = 6666
test2.test()
print(gl.point_cnt)
Example #10
0
def train(cfg, args):

    device = torch.device(cfg.MODEL.DEVICE)
    outdir = cfg.OUTPUT_DIR
    '''def collate_fn_padd(batch):
        print(batch)
        lengths = torch.tensor([ t.shape[0] for t in batch ]).to(device)
        batch = [ torch.Tensor(t).to(device) for t in batch ]
        batch = torch.nn.utils.rnn.pad_sequence(batch)
        mask = (batch != 0).to(device)
        return new_batch, lengths, mask'''

    # Initialize the network
    model = baseline(cfg, is_cat=args.is_cat)
    class_weights = [1, 1, 5, 5]  # could be adjusted
    class_weights = torch.FloatTensor(class_weights).to(device)
    criterion = nn.CrossEntropyLoss(weight=class_weights)

    # Initialize optimizer
    # optimizer = optim.SGD(model.parameters(), lr=float(args.initLR), momentum=0.9, weight_decay=args.weight_decay)
    optimizer = optim.Adam(model.parameters(),
                           lr=float(args.initLR),
                           weight_decay=float(args.weight_decay))

    # Initialize image batch
    # imBatch = Variable(torch.FloatTensor(args.batch_size, 1024, 14, 14))
    targetBatch = Variable(torch.LongTensor(args.batch_size))

    # Move network and batch to gpu
    # imBatch = imBatch.cuda(device)
    targetBatch = targetBatch.cuda(device)
    model = model.cuda(device)
    print(model)

    # Initialize dataloader
    Dataset = BatchLoader(
        imageRoot=args.imageroot,
        gtRoot=args.gtroot,
        #cropSize=(args.imWidth, args.imHeight)
    )
    # dataloader = DataLoader(Dataset, batch_size=args.batch_size, num_workers=0, shuffle=True, collate_fn=collate_fn_padd)
    dataloader = DataLoader(Dataset,
                            batch_size=args.batch_size,
                            num_workers=0,
                            shuffle=True)

    lossArr = []
    AccuracyArr = []
    accuracy = 0
    iteration = 0

    for epoch in range(0, 100):
        trainingLog = open(outdir + ('trainingLog_{0}.txt'.format(epoch)), 'w')
        accuracy = 0
        trainingLog.write(str(args))
        for i, dataBatch in enumerate(dataloader):
            iteration = i + 1
            #print(dataBatch)

            # Read data, under construction
            img_cpu = dataBatch['img'][0, :]
            N = img_cpu.shape[0]
            imBatch = Variable(torch.FloatTensor(N, 1024, 14, 14))
            imBatch = imBatch.cuda(device)
            # if args.batch_size == 1:
            #     img_list = to_image_list(img_cpu[0,:,:], cfg.DATALOADER.SIZE_DIVISIBILITY)
            # else:
            #     img_list = to_image_list(img_cpu, cfg.DATALOADER.SIZE_DIVISIBILITY)
            # print(cfg.DATALOADER.SIZE_DIVISIBILITY)
            # img_list = to_image_list(img_cpu, cfg.DATALOADER.SIZE_DIVISIBILITY)
            # img_list = to_image_list(img_cpu)
            imBatch.data.copy_(
                img_cpu)  # Tensor.shape(BatchSize, 3, Height, Width)

            target_cpu = dataBatch['target']
            # print(target_cpu)
            targetBatch.data.copy_(target_cpu)
            #print(imBatch.shape)
            #print(targetBatch.shape)

            # Train networ
            optimizer.zero_grad()

            # pred = model(features_roi, features_backbone)
            pred = model(imBatch)

            # print('target:', targetBatch[0,:][0])
            loss = criterion(pred, targetBatch)
            action = pred.cpu().argmax(dim=1).data.numpy()

            loss.backward()

            optimizer.step()
            accuracy += np.sum(action == targetBatch.cpu().data.numpy())

            lossArr.append(loss.cpu().data.item())
            AccuracyArr.append(accuracy / iteration / args.batch_size)

            meanLoss = np.mean(np.array(lossArr))
            if iteration % 100 == 0:
                print('prediction:', pred)
                print('predicted action:', action)
                print('ground truth:', targetBatch.cpu().data.numpy())
                print(
                    'Epoch %d Iteration %d: Loss %.5f Accumulated Loss %.5f' %
                    (epoch, iteration, lossArr[-1], meanLoss))

                trainingLog.write(
                    'Epoch %d Iteration %d: Loss %.5f Accumulated Loss %.5f \n'
                    % (epoch, iteration, lossArr[-1], meanLoss))

                print('Epoch %d Iteration %d: Accumulated Accuracy %.5f' %
                      (epoch, iteration, AccuracyArr[-1]))
                trainingLog.write(
                    'Epoch %d Iteration %d: Accumulated Accuracy %.5f \n' %
                    (epoch, iteration, AccuracyArr[-1]))

            if epoch in [50, 70] and iteration == 1:
                print('The learning rate is being decreased at Iteration %d',
                      iteration)
                trainingLog.write(
                    'The learning rate is being decreased at Iteration %d \n' %
                    iteration)
                for param_group in optimizer.param_groups:
                    param_group['lr'] /= 10

            if iteration == args.MaxIteration and epoch % 5 == 0:
                torch.save(model.state_dict(),
                           (outdir + 'netFinal_%d.pth' % (epoch + 1)))
                break

        if iteration >= args.MaxIteration:
            break

        if (epoch + 1) % 5 == 0:
            torch.save(model.state_dict(),
                       (outdir + 'netFinal_%d.pth' % (epoch + 1)))
        if args.val and epoch % 10 == 0:
            print("validation")
            test(cfg, args)
Example #11
0
import test2
from test2 import test


test(50)

test2.i = 2000
test(33)

test2.i = "testing"
test(None)

del test2.i
test("no i")
Example #12
0
def train(cfg, args):
    # torch.cuda.set_device(5)

    # device = torch.device(cfg.MODEL.DEVICE)
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    outdir = cfg.OUTPUT_DIR
    '''def collate_fn_padd(batch):
        print(batch)
        lengths = torch.tensor([ t.shape[0] for t in batch ]).to(device)
        batch = [ torch.Tensor(t).to(device) for t in batch ]
        batch = torch.nn.utils.rnn.pad_sequence(batch)
        mask = (batch != 0).to(device)
        return new_batch, lengths, mask'''

    # Initialize the network
    model = baseline(cfg, is_cat=args.is_cat)
    print(model)
    model.train()
    class_weights = [1, 1]  # could be adjusted
    class_weights = torch.FloatTensor(class_weights).to(device)
    criterion = nn.CrossEntropyLoss(weight=class_weights).cuda()

    # Initialize optimizer
    #optimizer = optim.SGD(model.parameters(), lr=float(args.initLR), momentum=0.9, weight_decay=0.001)
    optimizer = optim.Adam(model.parameters(),
                           lr=float(args.initLR),
                           weight_decay=0.0005)

    # Initialize image batch
    # imBatch = Variable(torch.FloatTensor(args.batch_size, 1024, 14, 14))
    targetBatch = Variable(torch.LongTensor(args.batch_size))

    # Move network and batch to gpu
    # imBatch = imBatch.cuda(device)
    # targetBatch = targetBatch.cuda(device)
    model = model.to(device)
    #print(model)

    # Initialize dataloader
    Dataset = BatchLoader(
        imageRoot=args.imageroot,
        gtRoot=args.gtroot,
        #cropSize=(args.imWidth, args.imHeight)
    )
    #dataloader = DataLoader(Dataset, batch_size=args.batch_size, num_workers=0, shuffle=True, collate_fn=collate_fn_padd)
    dataloader = DataLoader(Dataset,
                            batch_size=args.batch_size,
                            num_workers=4,
                            shuffle=True)

    iteration = 0

    print('Size of the training set:', dataloader.__len__())
    for epoch in range(0, args.num_epoch):
        trainingLog = open(outdir + ('trainingLog_{0}.txt'.format(epoch)), 'w')
        lossArr = []
        #accuracy = 0
        AccuracyArr = []
        trainingLog.write(str(args))
        for i, dataBatch in enumerate(dataloader):
            iteration = i + 1
            # print(i)
            #print(dataBatch)

            # Read data, under construction
            img_cpu = dataBatch['img'][0]
            imBatch = img_cpu.to(device)

            target_cpu = dataBatch['target']
            targetBatch = target_cpu.to(device)

            # Train network
            optimizer.zero_grad()
            pred = model(imBatch)

            # print('target:', targetBatch[0,:][0])
            loss = criterion(pred, targetBatch)
            action = pred.cpu().argmax(dim=1).data.numpy()

            loss.backward()

            optimizer.step()
            accuracy = np.sum(action == targetBatch.cpu().data.numpy())

            lossArr.append(loss.cpu().data.item())
            AccuracyArr.append(accuracy / args.batch_size)

            meanLoss = np.mean(np.array(lossArr))
            meanAcc = np.mean(np.array(AccuracyArr))
            if iteration % 100 == 0:
                print('prediction:', pred)
                print('predicted action:', action)
                print('ground truth:', targetBatch.cpu().data.numpy())
                print(
                    'Epoch %d Iteration %d: Loss %.5f Accumulated Loss %.5f' %
                    (epoch, iteration, lossArr[-1], meanLoss))

                trainingLog.write(
                    'Epoch %d Iteration %d: Loss %.5f Accumulated Loss %.5f \n'
                    % (epoch, iteration, lossArr[-1], meanLoss))

                print('Epoch %d Iteration %d: Accumulated Accuracy %.5f' %
                      (epoch, iteration, meanAcc))
                trainingLog.write(
                    'Epoch %d Iteration %d: Accumulated Accuracy %.5f \n' %
                    (epoch, iteration, meanAcc))

            if epoch in [int(0.5 * args.num_epoch),
                         int(0.7 * args.num_epoch)] and iteration == 1:
                print('The learning rate is being decreased at Iteration %d',
                      iteration)
                trainingLog.write(
                    'The learning rate is being decreased at Iteration %d \n' %
                    iteration)
                for param_group in optimizer.param_groups:
                    param_group['lr'] /= 10

        if iteration >= args.MaxIteration:
            break

        if (epoch + 1) % 5 == 0:
            torch.save(model.state_dict(),
                       (outdir + 'net_%d.pth' % (epoch + 1)))

        if args.val and (epoch + 1) % 5 == 0:
            print("validation")
            test(cfg, args)

    torch.save(model.state_dict(), (outdir + 'net_Final.pth'))