Пример #1
0
    def _tranform(self):

        train_z_transforms = transforms.Compose([ToTensor()])
        train_x_transforms = transforms.Compose([ToTensor()])

        self.ret['train_x_transforms'] = train_x_transforms(
            self.ret['instance_img'])

        self.ret['train_z_transforms'] = train_z_transforms(
            self.ret['exemplar_img'])
Пример #2
0
    def __init__(self, params, model_path = None, name='SiamRPN', **kargs):
        super(TrackerSiamRPNLate, self).__init__(name=name, is_deterministic=True)

        self.model = SiameseAlexNetLate()

        self.cuda = torch.cuda.is_available()
        self.device = torch.device('cuda:0' if self.cuda else 'cpu')

        checkpoint = torch.load(model_path, map_location = self.device)
        #print("1")
        if 'model' in checkpoint.keys():
            self.model.load_state_dict(torch.load(model_path, map_location = self.device)['model'])
        else:
            self.model.load_state_dict(torch.load(model_path, map_location = self.device))


        if self.cuda:
            self.model = self.model.cuda()
        self.model.eval()
        self.transforms = transforms.Compose([
            ToTensor()
        ])

        valid_scope = 2 * config.valid_scope + 1
        self.anchors = util.generate_anchors(   config.total_stride,
                                                config.anchor_base_size,
                                                config.anchor_scales,
                                                config.anchor_ratios,
                                                valid_scope)
        self.window = np.tile(np.outer(np.hanning(config.score_size), np.hanning(config.score_size))[None, :],
                              [config.anchor_num, 1, 1]).flatten()

        self.data_loader = TrackerRGBTDataLoader()
        self.old_loader = TrackerDataLoader()
Пример #3
0
 def __init__(self, model_path, gpu_id):
     self.gpu_id = gpu_id
     with torch.cuda.device(gpu_id):
         self.model = SiamFCNet(training=False)
         self.model.load_state_dict(torch.load(model_path))
         self.model = self.model.cuda()
         self.model.eval()
     self.transforms = transforms.Compose([ToTensor()])
Пример #4
0
 def __init__(self, model_path, gpu_id):
     #print(model_path)
     self.gpu_id = gpu_id
     with torch.cuda.device(gpu_id):
         self.model = SiameseNet()
         # Since we created the model using nn.DataParallel, we use it
         # again to load the state_dict
         self.model.features = torch.nn.DataParallel(self.model.features)
         self.model.load_state_dict(torch.load(model_path))
         self.model = self.model.cuda()
         self.model.eval()
     self.transforms = transforms.Compose([ToTensor()])
Пример #5
0
 def __init__(self, model_path, gpu_id):
     self.gpu_id = gpu_id
     with torch.cuda.device(gpu_id):
         self.model = SiamRPN()
         self.model.load_model(model_path)
         self.model = self.model.cuda()
         self.model.eval()
     self.response_sz = config.response_sz
     self.anchors = generate_anchors(config.total_stride,
                                     config.anchor_base_size,
                                     config.anchor_scales,
                                     config.anchor_ratios, self.response_sz)
     self.transforms = transforms.Compose([ToTensor()])
Пример #6
0
def main():

    global args, best_prec1
    args = parser.parse_args()
    loss_list = []
    acc_list = []

    # create Experiment directories
    cwd = os.getcwd()
    siamfc = os.path.join(cwd, 'siamfc')
    training_exp = os.path.join(cwd, 'training_exp')
    time = datetime.now().strftime('%d-%m-%Y_%H-%M-%S')
    dataset_name = args.datadir.split('/')[-1]
    dataset_exp_dir = os.path.join(training_exp, dataset_name)
    new_exp_dir = os.path.join(dataset_exp_dir, time)
    tensorboard_dir = os.path.join(new_exp_dir + '/tensorboard/')
    models_dir = os.path.join(new_exp_dir + '/models/')
    output_files = [os.path.join(cwd, 'train_siamfc.sh')]

    if not os.path.exists(new_exp_dir):
        os.makedirs(tensorboard_dir, exist_ok=True)
        os.makedirs(models_dir, exist_ok=True)
        print('New experiment folder created')

    for file in glob.glob(os.path.join(siamfc, "*.py")):
        if os.path.isfile(file):
            shutil.copyfile(file, new_exp_dir + file.split(siamfc)[1])
    for file in glob.glob(os.path.join(cwd, "*.out")):
        output_files.append(file)

    print('\n================= EXPERIMENT START TIME', time,
          '=================\n')

    # Create Tensorboard summary writer
    writer = SummaryWriter(tensorboard_dir)

    # loading meta data
    meta_data_path = os.path.join(args.datadir, "meta_data.pkl")
    meta_data = pickle.load(open(meta_data_path, 'rb'))
    videos = [x[0] for x in meta_data]
    print('Training with', args.datadir.split('/')[-1])

    # split train/valid dataset
    train_videos, valid_videos = train_test_split(videos,
                                                  test_size=1 -
                                                  config.train_ratio)

    # define transforms

    random_crop_size = config.instance_size - 2 * config.total_stride
    train_reference_transforms = transforms.Compose([
        CenterCrop((config.exemplar_size, config.exemplar_size)),
        Normalize(),
        ToTensor()
    ])
    train_search_transforms = transforms.Compose([
        RandomCrop((random_crop_size, random_crop_size), config.max_translate),
        Normalize(),
        ToTensor()
    ])
    valid_reference_transforms = transforms.Compose([
        CenterCrop((config.exemplar_size, config.exemplar_size)),
        Normalize(),
        ToTensor()
    ])
    valid_search_transforms = transforms.Compose([Normalize(), ToTensor()])

    # opem lmdb
    db = lmdb.open(args.datadir + '.lmdb', readonly=True, map_size=int(50e9))

    # create dataset
    train_dataset = ImagnetVIDDataset(db, train_videos, args.datadir,
                                      train_reference_transforms,
                                      train_search_transforms)
    valid_dataset = ImagnetVIDDataset(db,
                                      valid_videos,
                                      args.datadir,
                                      valid_reference_transforms,
                                      valid_search_transforms,
                                      training=False)
    # create dataloader
    print('Loading Train Dataset...')
    trainloader = DataLoader(train_dataset,
                             batch_size=config.train_batch_size,
                             shuffle=True,
                             pin_memory=True,
                             num_workers=config.train_num_workers,
                             drop_last=True)
    print('Loading Validation Dataset...')
    validloader = DataLoader(valid_dataset,
                             batch_size=config.valid_batch_size,
                             shuffle=False,
                             pin_memory=True,
                             num_workers=config.valid_num_workers,
                             drop_last=True)

    print('Initializing SiameseNet with {} Loss...'.format(args.loss.upper()))
    model = siamnet.SiameseNet(loss=args.loss)
    model.features = torch.nn.DataParallel(model.features)
    model.init_weights()
    model = model.cuda()
    print("Available GPUs:", torch.cuda.device_count())
    print("Model running on GPU:", next(model.parameters()).is_cuda), '\n\n'
    cudnn.benchmark = True

    optimizer = torch.optim.SGD(model.parameters(),
                                lr=config.lr,
                                momentum=config.momentum,
                                weight_decay=config.weight_decay)

    scheduler = StepLR(optimizer,
                       step_size=config.step_size,
                       gamma=config.gamma)

    for epoch in range(config.start_epoch, config.epoch):

        # model.train() tells your model that you are training the model.
        # So effectively layers like dropout, batchnorm etc. which behave
        # different on the train and test procedures

        training_loss = []
        model.train()

        for i, data in enumerate(tqdm(trainloader)):

            reference_imgs, search_imgs = data

            # Variable is a thin wrapper around a Tensor object,
            # that also holds the gradient w.r.t. to it.
            reference_var = Variable(reference_imgs).cuda()
            search_var = Variable(search_imgs).cuda()

            # we need to set the gradients to zero before starting
            # to do backpropragation because PyTorch accumulates
            # the gradients on subsequent backward passes.
            optimizer.zero_grad()

            outputs = model(reference_var, search_var)
            loss = model.compute_loss(outputs)
            loss.backward()
            optimizer.step()

            step = epoch * len(trainloader) + i
            writer.add_scalars('Loss', {'Training': loss.data}, step)

            training_loss.append(loss.data)

        training_loss = torch.mean(torch.stack(training_loss)).item()
        valid_loss = []
        model.eval()

        for i, data in enumerate(tqdm(validloader)):

            reference_imgs, search_imgs = data
            reference_var = Variable(reference_imgs.cuda())
            search_var = Variable(search_imgs.cuda())
            outputs = model(reference_var, search_var)
            loss = model.compute_loss(outputs)
            valid_loss.append(loss.data)

        valid_loss = torch.mean(torch.stack(valid_loss)).item()

        print("EPOCH %d Training Loss: %.4f, Validation Loss: %.4f" %
              (epoch, training_loss, valid_loss))

        torch.save(model.cpu().state_dict(),
                   models_dir + "siamfc_{}.pth".format(epoch + 1))
        writer.add_scalars('Loss', {'Validation': valid_loss},
                           (epoch + 1) * len(trainloader))

        model.cuda()
        scheduler.step()

    time = datetime.now().strftime('%d-%m-%Y-%H:%M:%S')
    print('\n================= EXPERIMENT END TIME', time,
          '=================\n')

    # Copy slurm output files to experiment folder
    for file in output_files:
        try:
            shutil.copyfile(file, new_exp_dir + file.split(cwd)[1])
        except Exception as error:
            print('Error copying file {} to experiment folder: {}'.format(
                file, error))
            pass
Пример #7
0
def main():
    '''parameter initialization'''
    args = parser.parse_args()
    exp_name_dir = util.experiment_name_dir(args.experiment_name)
    '''model on gpu'''
    model = TrackerSiamRPN()
    '''setup train data loader'''
    name = 'GOT-10k'
    assert name in ['VID', 'GOT-10k', 'All', 'RGBT-234']
    if name == 'GOT-10k':
        root_dir_RGBT234 = args.train_path
        root_dir_GTOT = '/home/krautsct/Grayscale-Thermal-Dataset'
        seq_dataset_rgb = GOT10k(root_dir_RGBT234, subset='train_i')
        seq_dataset_i = GOT10k(root_dir_RGBT234,
                               subset='train_i',
                               visible=False)
    elif name == 'VID':
        root_dir = '/home/arbi/desktop/ILSVRC'
        seq_dataset = ImageNetVID(root_dir, subset=('train'))
    elif name == 'All':
        root_dir_vid = '/home/arbi/desktop/ILSVRC'
        seq_datasetVID = ImageNetVID(root_dir_vid, subset=('train'))
        root_dir_got = args.train_path
        seq_datasetGOT = GOT10k(root_dir_got, subset='train')
        seq_dataset = util.data_split(seq_datasetVID, seq_datasetGOT)
    elif name == 'RGBT-234':
        root_dir = args.train_path
        seq_dataset = RGBTSequence(root_dir, subset='train')
        seq_dataset_val = RGBTSequence(root_dir, subset='val')
    print('seq_dataset', len(seq_dataset_rgb))

    train_z_transforms = transforms.Compose([ToTensor()])
    train_x_transforms = transforms.Compose([ToTensor()])

    train_data_ir = TrainDataLoader_ir(seq_dataset_i, train_z_transforms,
                                       train_x_transforms, name)
    anchors = train_data_ir.anchors
    train_loader_ir = DataLoader(dataset=train_data_ir,
                                 batch_size=config.train_batch_size,
                                 shuffle=True,
                                 num_workers=config.train_num_workers,
                                 pin_memory=True)
    train_data_rgb = TrainDataLoader(seq_dataset_rgb, train_z_transforms,
                                     train_x_transforms, name)
    anchors = train_data_rgb.anchors
    train_loader_rgb = DataLoader(dataset=train_data_rgb,
                                  batch_size=config.train_batch_size,
                                  shuffle=True,
                                  num_workers=config.train_num_workers,
                                  pin_memory=True)
    '''setup val data loader'''
    name = 'GOT-10k'
    assert name in ['VID', 'GOT-10k', 'All', 'RGBT-234']
    if name == 'GOT-10k':
        val_dir = '/home/krautsct/RGB-t-Val'
        seq_dataset_val_rgb = GOT10k(val_dir, subset='train_i')
        seq_dataset_val_ir = GOT10k(val_dir, subset='train_i', visible=False)
    elif name == 'VID':
        root_dir = '/home/arbi/desktop/ILSVRC'
        seq_dataset_val = ImageNetVID(root_dir, subset=('val'))
    elif name == 'All':
        root_dir_vid = '/home/arbi/desktop/ILSVRC'
        seq_datasetVID = ImageNetVID(root_dir_vid, subset=('val'))
        root_dir_got = args.train_path
        seq_datasetGOT = GOT10k(root_dir_got, subset='val')
        seq_dataset_val = util.data_split(seq_datasetVID, seq_datasetGOT)
    print('seq_dataset_val', len(seq_dataset_val_rgb))

    valid_z_transforms = transforms.Compose([ToTensor()])
    valid_x_transforms = transforms.Compose([ToTensor()])

    val_data = TrainDataLoader_ir(seq_dataset_val_ir, valid_z_transforms,
                                  valid_x_transforms, name)
    val_loader_ir = DataLoader(dataset=val_data,
                               batch_size=config.valid_batch_size,
                               shuffle=False,
                               num_workers=config.valid_num_workers,
                               pin_memory=True)
    val_data_rgb = TrainDataLoader(seq_dataset_val_rgb, valid_z_transforms,
                                   valid_x_transforms, name)
    val_loader_rgb = DataLoader(dataset=val_data_rgb,
                                batch_size=config.valid_batch_size,
                                shuffle=False,
                                num_workers=config.valid_num_workers,
                                pin_memory=True)

    val_losslist = []
    '''load weights'''

    if not args.checkpoint_path == None:
        assert os.path.isfile(
            args.checkpoint_path), '{} is not valid checkpoint_path'.format(
                args.checkpoint_path)
        checkpoint = torch.load(args.checkpoint_path, map_location='cpu')
        if 'model' in checkpoint.keys():
            model.net.load_state_dict(
                torch.load(args.checkpoint_path, map_location='cpu')['model'])
        else:
            model.net.load_state_dict(
                torch.load(args.checkpoint_path, map_location='cpu'))
        torch.cuda.empty_cache()
        print('You are loading the model.load_state_dict')

    elif config.pretrained_model:
        checkpoint = torch.load(config.pretrained_model)
        # change name and load parameters
        checkpoint = {
            k.replace('features.features', 'featureExtract'): v
            for k, v in checkpoint.items()
        }
        model_dict = model.net.state_dict()
        model_dict.update(checkpoint)
        model.net.load_state_dict(model_dict)
        #torch.cuda.empty_cache()
    '''train phase'''
    train_closses, train_rlosses, train_tlosses = AverageMeter(), AverageMeter(
    ), AverageMeter()
    val_closses, val_rlosses, val_tlosses = AverageMeter(), AverageMeter(
    ), AverageMeter()

    #train_val_plot = SavePlot(exp_name_dir, 'train_val_plot')
    val_plot = SavePlotVal(exp_name_dir, 'val_plot')
    for epoch in range(config.epoches):
        model.net.train()
        if config.fix_former_3_layers:
            util.freeze_layers(model.net)
        print('Train epoch {}/{}'.format(epoch + 1, config.epoches))
        train_loss = []
        with tqdm(total=config.train_epoch_size) as progbar:
            for i, (dataset_rgb, dataset_ir) in enumerate(
                    zip(train_loader_rgb, train_loader_ir)):
                #for i, dataset_rgb in enumerate(train_loader_rgb):

                closs, rloss, loss = model.step(epoch,
                                                dataset_rgb,
                                                dataset_ir,
                                                anchors,
                                                epoch,
                                                i,
                                                train=True)

                closs_ = closs.cpu().item()

                if np.isnan(closs_):
                    sys.exit(0)

                train_closses.update(closs.cpu().item())
                train_rlosses.update(rloss.cpu().item())
                train_tlosses.update(loss.cpu().item())

                progbar.set_postfix(closs='{:05.3f}'.format(train_closses.avg),
                                    rloss='{:05.5f}'.format(train_rlosses.avg),
                                    tloss='{:05.3f}'.format(train_tlosses.avg))

                progbar.update()
                train_loss.append(train_tlosses.avg)

                if i >= config.train_epoch_size - 1:
                    '''save model'''
                    model.save(model, exp_name_dir, epoch)

                    break

        train_loss = np.mean(train_loss)
        '''val phase'''
        val_loss = []
        with tqdm(total=config.val_epoch_size) as progbar:
            print('Val epoch {}/{}'.format(epoch + 1, config.epoches))
            for i, (dataset_rgb,
                    dataset_ir) in enumerate(zip(val_loader_rgb,
                                                 val_loader_ir)):
                #for i, dataset_rgb in enumerate(val_loader_rgb):

                val_closs, val_rloss, val_tloss = model.step(epoch,
                                                             dataset_rgb,
                                                             dataset_ir,
                                                             anchors,
                                                             epoch,
                                                             train=False)

                closs_ = val_closs.cpu().item()

                if np.isnan(closs_):
                    sys.exit(0)

                val_closses.update(val_closs.cpu().item())
                val_rlosses.update(val_rloss.cpu().item())
                val_tlosses.update(val_tloss.cpu().item())

                progbar.set_postfix(closs='{:05.3f}'.format(val_closses.avg),
                                    rloss='{:05.5f}'.format(val_rlosses.avg),
                                    tloss='{:05.3f}'.format(val_tlosses.avg))

                progbar.update()

                val_loss.append(val_tlosses.avg)

                if i >= config.val_epoch_size - 1:
                    break

        val_loss = np.mean(val_loss)
        #train_val_plot.update(train_loss, val_loss)
        val_plot.update(val_loss)
        val_losslist.append(val_loss)
        print('Train loss: {}, val loss: {}'.format(train_loss, val_loss))
        record_path = os.path.dirname(exp_name_dir)
        if not os.path.isdir(record_path):
            os.makedirs(record_path)
        record_file = os.path.join(exp_name_dir, 'val_losses.txt')
        np.savetxt(record_file, val_losslist, fmt='%.3f', delimiter=',')
Пример #8
0
        sunset_model = torch.nn.DataParallel(sunset_model)

    sunrise_model.cuda()
    sunset_model.cuda()

sunrise_model.eval()
sunset_model.eval()

data = WebcamData()
days = data.days
if constants.CENTER:
    test_transformations = torchvision.transforms.Compose(
        [Resize(),
         RandomPatch(constants.PATCH_SIZE),
         Center(),
         ToTensor()])
else:
    test_transformations = torchvision.transforms.Compose(
        [Resize(), RandomPatch(constants.PATCH_SIZE),
         ToTensor()])
test_dataset = Test(data, test_transformations)

#test_dataset.set_mode('sunrise')

if torch.cuda.is_available():
    pin_memory = True
else:
    print('WARNING - Not using GPU.')
    pin_memory = False

test_loader = torch.utils.data.DataLoader(
Пример #9
0
def main():


    train_z_transforms = transforms.Compose([
        # RandomStretch(),
        # CenterCrop((config.exemplar_size, config.exemplar_size)),
        ToTensor()
    ])
    train_x_transforms = transforms.Compose([
        # RandomStretch(),
        # RandomCrop((config.instance_size, config.instance_size),
        #            config.max_translate),
        # ColorAug(config.color_ratio),
        ToTensor()
    ])
    val_z_transforms = transforms.Compose([
        # CenterCrop((config.exemplar_size, config.exemplar_size)),
        ToTensor()
    ])
    val_x_transforms = transforms.Compose([
        ToTensor()
    ])

    score_size = int((config.instance_size - config.exemplar_size) / config.total_stride + 1)

    anchors = generate_anchors(config.total_stride, config.anchor_base_size, config.anchor_scales,
                                    config.anchor_ratios,
                                    score_size)
    # create dataset
    train_dataset = GOT_10KDataset(train_data_dir, train_z_transforms, train_x_transforms, anchors)
    valid_dataset = GOT_10KDataset(val_data_dir, val_z_transforms, val_x_transforms, anchors)

    trainloader = DataLoader(train_dataset, batch_size=config.train_batch_size,
                             shuffle=True, pin_memory=True, num_workers=config.train_num_workers, drop_last=True)
    validloader = DataLoader(valid_dataset, batch_size=config.valid_batch_size,
                             shuffle=False, pin_memory=True, num_workers=config.valid_num_workers, drop_last=True)
    # create summary writer
    if not os.path.exists(config.log_dir):
        os.mkdir(config.log_dir)
    summary_writer = SummaryWriter(config.log_dir)

    # start training
    with torch.cuda.device(config.gpu_id):
        model = SiamRPN()
        model.load_pretrain(pretrain_model_dir)
        model.freeze_layers()
        model = model.cuda()
        optimizer = torch.optim.SGD(model.parameters(), lr=config.lr,
                                    momentum=config.momentum, weight_decay=config.weight_decay)
        # schdeuler = StepLR(optimizer, step_size=config.step_size, gamma=config.gamma)

        scheduler = np.logspace(math.log10(config.lr), math.log10(config.end_lr), config.epoch)


        for epoch in range(config.epoch):
            train_loss = []
            model.train()
            curlr = scheduler[epoch]
            for param_group in optimizer.param_groups:
                param_group['lr'] = curlr
            for i, data in enumerate(tqdm(trainloader)):
                z, x, reg_label, cls_label = data
                z, x = Variable(z.cuda()), Variable(x.cuda())
                reg_label, cls_label = Variable(reg_label.cuda()), Variable(cls_label.cuda())
                pred_cls, pred_reg = model(z, x)
                optimizer.zero_grad()
                # permute
                pred_cls = pred_cls.reshape(-1, 1, config.anchor_num * score_size * score_size).permute(0,2,1)
                pred_reg = pred_reg.reshape(-1, 4, config.anchor_num * score_size * score_size).permute(0,2,1)
                cls_loss = rpn_cross_entropy_balance(pred_cls, cls_label, config.num_pos, config.num_neg)
                reg_loss = rpn_smoothL1(pred_reg, reg_label, cls_label, config.num_pos)
                loss = cls_loss + config.lamb * reg_loss
                loss.backward()
                torch.nn.utils.clip_grad_norm_(model.parameters(), config.clip)
                optimizer.step()
                step = epoch * len(trainloader) + i
                summary_writer.add_scalar('train/loss', loss.data, step)
                train_loss.append(loss.data.cpu().numpy())
            train_loss = np.mean(train_loss)
            valid_loss = []
            model.eval()
            for i, data in enumerate(tqdm(validloader)):
                z, x, reg_label, cls_label = data
                z, x = Variable(z.cuda()), Variable(x.cuda())
                reg_label, cls_label = Variable(reg_label.cuda()), Variable(cls_label.cuda())
                pred_cls, pred_reg = model(z, x)
                pred_cls = pred_cls.reshape(-1, 1, config.anchor_num * score_size * score_size).permute(0, 2, 1)
                pred_reg = pred_reg.reshape(-1, 4, config.anchor_num * score_size * score_size).permute(0, 2, 1)
                cls_loss = rpn_cross_entropy_balance(pred_cls, cls_label, config.num_pos, config.num_neg)
                reg_loss = rpn_smoothL1(pred_reg, reg_label, cls_label, config.num_pos)
                loss = cls_loss + config.lamb * reg_loss
                valid_loss.append(loss.data.cpu().numpy())
            valid_loss = np.mean(valid_loss)
            print("EPOCH %d valid_loss: %.4f, train_loss: %.4f, learning_rate: %.4f" %
                  (epoch, valid_loss, train_loss, optimizer.param_groups[0]["lr"]))
            summary_writer.add_scalar('valid/loss',
                                      valid_loss, epoch + 1)
            torch.save(model.cpu().state_dict(),
                       "./models/siamrpn_{}.pth".format(epoch + 1))
            model.cuda()
Пример #10
0
import sys
import lab_distribution
from custom_transforms import RGB2LAB, ToTensor
from network import Net
from plot import *
from logger import Logger
from tensorboardX import SummaryWriter
from torch.autograd import Variable

#data_dir = '/home/perrito/kth/DD2424/project/images/stl10_binary/train_X.bin'
data_dir = '../data/stl10/data/stl10_binary/train_X.bin'
ab_bins_dict = lab_distribution.get_ab_bins_from_data(data_dir)
ab_bins, a_bins, b_bins = ab_bins_dict['ab_bins'], ab_bins_dict[
    'a_bins'], ab_bins_dict['b_bins']

transform = transforms.Compose([RGB2LAB(ab_bins), ToTensor()])

trainset = torchvision.datasets.ImageFolder(root='tmp_red_bird',
                                            transform=transform)
trainloader = torch.utils.data.DataLoader(trainset,
                                          batch_size=1,
                                          shuffle=False,
                                          num_workers=2)

cnn = Net(ab_bins_dict)
cnn.get_rarity_weights(data_dir)
criterion = cnn.loss
optimizer = optim.Adam(cnn.parameters(), weight_decay=.001)
# optimizer = optim.SGD(cnn.parameters(), lr=1e-2, momentum=0.9)
logger = Logger('./log')
logger.add_graph(cnn, image_size=96)
Пример #11
0
def main():
    '''parameter initialization'''
    args = parser.parse_args()
    exp_name_dir = util.experiment_name_dir(args.experiment_name)
    '''model on gpu'''
    model = TrackerSiamRPN()
    '''setup train data loader'''
    name = 'GOT-10k'
    assert name in ['VID', 'GOT-10k', 'All']
    if name == 'GOT-10k':
        root_dir = args.train_path
        seq_dataset = GOT10k(root_dir, subset='train')
    elif name == 'VID':
        root_dir = '../data/ILSVRC'
        seq_dataset = ImageNetVID(root_dir, subset=('train'))
    elif name == 'All':
        root_dir_vid = '../data/ILSVRC'
        seq_datasetVID = ImageNetVID(root_dir_vid, subset=('train'))
        root_dir_got = args.train_path
        seq_datasetGOT = GOT10k(root_dir_got, subset='train')
        seq_dataset = util.data_split(seq_datasetVID, seq_datasetGOT)
    print('seq_dataset', len(seq_dataset))

    train_z_transforms = transforms.Compose([ToTensor()])
    train_x_transforms = transforms.Compose([ToTensor()])

    train_data = TrainDataLoader(seq_dataset, train_z_transforms,
                                 train_x_transforms, name)
    anchors = train_data.anchors
    train_loader = DataLoader(dataset=train_data,
                              batch_size=config.train_batch_size,
                              shuffle=True,
                              num_workers=config.train_num_workers,
                              pin_memory=True)
    '''setup val data loader'''
    name = 'GOT-10k'
    assert name in ['VID', 'GOT-10k', 'All']
    if name == 'GOT-10k':
        root_dir = args.train_path
        seq_dataset_val = GOT10k(root_dir, subset='val')
    elif name == 'VID':
        root_dir = '../data/ILSVRC'
        seq_dataset_val = ImageNetVID(root_dir, subset=('val'))
    elif name == 'All':
        root_dir_vid = '../data/ILSVRC'
        seq_datasetVID = ImageNetVID(root_dir_vid, subset=('val'))
        root_dir_got = args.train_path
        seq_datasetGOT = GOT10k(root_dir_got, subset='val')
        seq_dataset_val = util.data_split(seq_datasetVID, seq_datasetGOT)
    print('seq_dataset_val', len(seq_dataset_val))

    valid_z_transforms = transforms.Compose([ToTensor()])
    valid_x_transforms = transforms.Compose([ToTensor()])

    val_data = TrainDataLoader(seq_dataset_val, valid_z_transforms,
                               valid_x_transforms, name)
    val_loader = DataLoader(dataset=val_data,
                            batch_size=config.valid_batch_size,
                            shuffle=False,
                            num_workers=config.valid_num_workers,
                            pin_memory=True)
    '''load weights'''

    if not args.checkpoint_path == None:
        assert os.path.isfile(
            args.checkpoint_path), '{} is not valid checkpoint_path'.format(
                args.checkpoint_path)
        checkpoint = torch.load(args.checkpoint_path, map_location='cpu')
        if 'model' in checkpoint.keys():
            model.net.load_state_dict(
                torch.load(args.checkpoint_path, map_location='cpu')['model'])
        else:
            model.net.load_state_dict(
                torch.load(args.checkpoint_path, map_location='cpu'))
        torch.cuda.empty_cache()
        print('You are loading the model.load_state_dict')

    elif config.pretrained_model:
        checkpoint = torch.load(config.pretrained_model)
        # change name and load parameters
        checkpoint = {
            k.replace('features.features', 'featureExtract'): v
            for k, v in checkpoint.items()
        }
        model_dict = model.net.state_dict()
        model_dict.update(checkpoint)
        model.net.load_state_dict(model_dict)
        #torch.cuda.empty_cache()
        print('You are loading the pretrained model')
    '''train phase'''
    train_closses, train_rlosses, train_tlosses = AverageMeter(), AverageMeter(
    ), AverageMeter()
    val_closses, val_rlosses, val_tlosses = AverageMeter(), AverageMeter(
    ), AverageMeter()

    train_val_plot = SavePlot(exp_name_dir, 'train_val_plot')
    model.adjust_lr(args.epoch_i)

    for epoch in range(args.epoch_i, config.epoches):
        model.net.train()
        if config.fix_former_3_layers:
            util.freeze_layers(model.net)
        print('Train epoch {}/{}'.format(epoch + 1, config.epoches))
        train_loss = []
        with tqdm(total=config.train_epoch_size) as progbar:
            for i, dataset in enumerate(train_loader):

                closs, rloss, loss = model.step(epoch,
                                                dataset,
                                                anchors,
                                                i,
                                                train=True)

                closs_ = closs.cpu().item()

                if np.isnan(closs_):
                    sys.exit(0)

                train_closses.update(closs.cpu().item())
                train_rlosses.update(rloss.cpu().item())
                train_tlosses.update(loss.cpu().item())

                progbar.set_postfix(closs='{:05.3f}'.format(train_closses.avg),
                                    rloss='{:05.5f}'.format(train_rlosses.avg),
                                    tloss='{:05.3f}'.format(train_tlosses.avg))

                progbar.update()
                train_loss.append(train_tlosses.avg)

                if i >= config.train_epoch_size - 1:
                    '''save model'''
                    model.save(model, exp_name_dir, epoch)

                    break

        train_loss = np.mean(train_loss)
        '''val phase'''
        val_loss = []
        with tqdm(total=config.val_epoch_size) as progbar:
            print('Val epoch {}/{}'.format(epoch + 1, config.epoches))
            for i, dataset in enumerate(val_loader):

                val_closs, val_rloss, val_tloss = model.step(epoch,
                                                             dataset,
                                                             anchors,
                                                             train=False)

                closs_ = val_closs.cpu().item()

                if np.isnan(closs_):
                    sys.exit(0)

                val_closses.update(val_closs.cpu().item())
                val_rlosses.update(val_rloss.cpu().item())
                val_tlosses.update(val_tloss.cpu().item())

                progbar.set_postfix(closs='{:05.3f}'.format(val_closses.avg),
                                    rloss='{:05.5f}'.format(val_rlosses.avg),
                                    tloss='{:05.3f}'.format(val_tlosses.avg))

                progbar.update()

                val_loss.append(val_tlosses.avg)

                if i >= config.val_epoch_size - 1:
                    break

        val_loss = np.mean(val_loss)
        train_val_plot.update(train_loss, val_loss)
        print('Train loss: {}, val loss: {}'.format(train_loss, val_loss))
Пример #12
0
def main():


    train_z_transforms = transforms.Compose([
        RandomStretch(),
        CenterCrop((config.exemplar_size, config.exemplar_size)),
        ToTensor()
    ])
    train_x_transforms = transforms.Compose([
        RandomStretch(),
        RandomCrop((config.instance_size, config.instance_size),
                   config.max_translate),
        ToTensor()
    ])
    val_z_transforms = transforms.Compose([
        CenterCrop((config.exemplar_size, config.exemplar_size)),
        ToTensor()
    ])
    val_x_transforms = transforms.Compose([
        ToTensor()
    ])

    # create dataset
    train_dataset = GOT_10KDataset(train_data_dir, train_z_transforms, train_x_transforms)
    valid_dataset = GOT_10KDataset(val_data_dir, val_z_transforms, val_x_transforms, training=False)

    trainloader = DataLoader(train_dataset, batch_size=config.train_batch_size,
                             shuffle=True, pin_memory=True, num_workers=config.train_num_workers, drop_last=True)
    validloader = DataLoader(valid_dataset, batch_size=config.valid_batch_size,
                             shuffle=False, pin_memory=True, num_workers=config.valid_num_workers, drop_last=True)
    # create summary writer
    if not os.path.exists(config.log_dir):
        os.mkdir(config.log_dir)
    summary_writer = SummaryWriter(config.log_dir)

    # start training
    with torch.cuda.device(config.gpu_id):
        model = SiamFCNet()
        model.init_weights()
        # model.load_state_dict(torch.load('./models/siamfc_30.pth'))
        model = model.cuda()
        optimizer = torch.optim.SGD(model.parameters(), lr=config.lr,
                                    momentum=config.momentum, weight_decay=config.weight_decay)
        # optimizer = torch.optim.Adam(model.parameters(), lr=config.lr)
        schdeuler = StepLR(optimizer, step_size=config.step_size,
                           gamma=config.gamma)

        for epoch in range(config.epoch):
            train_loss = []
            model.train()
            for i, data in enumerate(tqdm(trainloader)):
                z, x = data
                z, x = Variable(z.cuda()), Variable(x.cuda())
                outputs = model(z, x)
                optimizer.zero_grad()
                loss = model.loss(outputs)
                loss.backward()
                optimizer.step()
                step = epoch * len(trainloader) + i
                summary_writer.add_scalar('train/loss', loss.data, step)
                train_loss.append(loss.data.cpu().numpy())
            train_loss = np.mean(train_loss)
            valid_loss = []
            model.eval()
            for i, data in enumerate(tqdm(validloader)):
                z, x = data
                z, x = Variable(z.cuda()), Variable(x.cuda())
                outputs = model(z, x)
                loss = model.loss(outputs)
                valid_loss.append(loss.data.cpu().numpy())
            valid_loss = np.mean(valid_loss)
            print("EPOCH %d valid_loss: %.4f, train_loss: %.4f, learning_rate: %.4f" %
                  (epoch, valid_loss, train_loss, optimizer.param_groups[0]["lr"]))
            summary_writer.add_scalar('valid/loss',
                                      valid_loss, epoch + 1)
            torch.save(model.cpu().state_dict(),
                       "./models/siamfc_{}.pth".format(epoch + 1))
            model.cuda()
            schdeuler.step()
Пример #13
0
        self._pick_img_pairs(index)
        self.open()

        self._tranform()

        regression_target, conf_target = self._target()
        self.count += 1

        return self.ret['train_z_transforms_rgb'], \
               self.ret['train_x_transforms_rgb'], \
               self.ret['train_z_transforms_ir'], \
               self.ret['train_x_transforms_ir'], \
               regression_target, \
               conf_target.astype(np.int64)

    def __len__(self):
        return config.train_epoch_size * 64


if __name__ == "__main__":

    root_dir = '/home/krautsct/RGB-T234'
    seq_dataset_rgb = GOT10k(root_dir, subset='train_i')
    seq_dataset_i = GOT10k(root_dir, subset='train_i', visible=False)
    train_z_transforms = transforms.Compose([ToTensor()])
    train_x_transforms = transforms.Compose([ToTensor()])
    train_data = TrainDataLoaderRGBT(seq_dataset_rgb, seq_dataset_i,
                                     train_z_transforms, train_x_transforms)
    res = train_data.__getitem__(180)
    print(res)