Exemple #1
0
def eval_data2(testloader):
    opt.batch_size = opt.hist

    model, loss, optim_seq, optim_both = joined_model.create_model()
    model_name = ''
    if opt.seq_both_resume:
        model.resume_both(os.path.abspath(opt.seq_both_resume_str))
        model_name = opt.seq_both_resume_str
    else:
        model.resume_sweaty(os.path.abspath(opt.sweaty_resume_str))
        model_name = opt.sweaty_resume_str
        if opt.seq_resume:
            model_name += '_' + opt.seq_resume_str
            model.resume_seq(os.path.abspath(opt.seq_resume_str))

    logger.debug('model name: %s' % model_name)
    dir_check(opt.save_out)
    dir_check(os.path.join(opt.save_out, opt.seq_model))
    model.eval()
    model = model.cuda()
    if opt.seq_model == 'lstm':
        lstm.test(testloader, model, out=True)
    if opt.seq_model == 'tcn':
        # tcn.test(testloader, model)
        tcn_ed.test(testloader, model, out=True)
 def __init__(self, path, maxlen=20, prediction=10):
     logger.debug('create ball dataset')
     self.balls = {}
     self.balls_coord = {}
     self.balls_frames = []
     self.len = 0
     self.balls_maxframe = {}
     self.maxlen = maxlen
     if opt.seq_model == 'lstm':
         self.prediction = prediction
     if opt.seq_model == 'tcn':
         self.prediction = 1
     files = sorted(list(os.listdir(path)), key=keyf)
     for filename in files:
         if not filename.endswith('.npy'):
             continue
         search = re.search(r'ball(\d*)_(\d*).npy', filename)
         n_ball = int(search.group(1))
         n_frame = int(search.group(2))
         feature_map = np.load(join(path, filename))[..., None]
         features = self.balls.get(n_ball, None)
         features = join_data(features, feature_map, np.concatenate, axis=2)
         self.balls[n_ball] = features
     self.h = features.shape[0]
     self.w = features.shape[1]
     for ball_idx, data in self.balls.items():
         for n_frame in range(1, data.shape[-1] - self.prediction):
             self.balls_frames.append([ball_idx, n_frame])
         self.balls_coord[ball_idx] = np.loadtxt(
             os.path.join(path, 'ball%d.txt' % ball_idx))
 def wrap(*args, **kwargs):
     time1 = time.time()
     ret = f(*args, **kwargs)
     time2 = time.time()
     logger.debug('%s took %0.3f ms ~ %0.3f min ~ %0.3f sec' %
                  (f, (time2 - time1) * 1000.0, (time2 - time1) / 60.0,
                   (time2 - time1)))
     return ret
Exemple #4
0
 def evaluator(self, trainloader, testloader, print_every=1000, both=0):
     '''
     train and validate model
     '''
     resume_epoch = 0
     logger.debug('Model')
     self.model.off_sweaty()
     for epoch in range(resume_epoch, self.epochs):
         if epoch == both:
             self.both = True
             self.model.on_sweaty()
         self.test(epoch, testloader)
         self.train(epoch, trainloader, print_every=print_every)
         logger.debug('threshold: %s' % str(self.threshold))
         if epoch % opt.save_every == 0:
             save_model = {
                 'threshold': self.threshold,
                 'epoch': epoch,
                 'state_dict_model': self.model.state_dict()
             }
             model_name = '{}_lr_{}_opt_{}_epoch_{}'.format(
                 opt.seq_save_model, self.lr, self.optim, epoch)
             model_dir = opt.model_root + '/' + model_name
             torch.save(save_model, model_dir)
Exemple #5
0
def eval_data1(testloader):
    opt.batch_size = opt.hist

    model, loss, optim_seq, optim_both = joined_model.create_model()
    model_name = ''
    if opt.seq_both_resume:
        model.resume_both(os.path.abspath(opt.seq_both_resume_str))
        model_name = opt.seq_both_resume_str
    else:
        model.resume_sweaty(os.path.abspath(opt.sweaty_resume_str))
        model_name = opt.sweaty_resume_str
        if opt.seq_resume:
            model_name += '_' + opt.seq_resume_str
            model.resume_seq(os.path.abspath(opt.seq_resume_str))

    opt.lr = 1e-5
    modeleval = ModelEvaluator(model,
                               threshold=5.0535,
                               min_radius=2.625,
                               optim_seq=optim_seq,
                               optim_both=optim_both,
                               loss=loss)
    logger.debug('start')
    modeleval.test(model_name, testloader)
Exemple #6
0
def create_model():
    torch.manual_seed(opt.seed)
    model = LSTM()
    loss = nn.MSELoss(reduction='sum').cuda()
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=opt.lr,
                                 weight_decay=opt.weight_decay)
    logger.debug(str(model))
    logger.debug(str(loss))
    logger.debug(str(optimizer))
    return model, loss, optimizer
Exemple #7
0
    def train(self, epoch, trainloader, print_every=100):
        '''
        method for training
        '''
        self.model.train()
        self.model = self.model.cuda()
        losses = Averaging()
        loss_batch = 0
        # TP, FP, FN, TN = 0, 0, 0, 0
        for b_idx, (train_data, train_labels) in enumerate(trainloader):
            train_data = train_data.float()
            train_labels = train_labels.float()
            if opt.real_balls:
                train_data = train_data.squeeze()
                train_labels = train_labels.view(-1, opt.seq_predict,
                                                 opt.map_size_x,
                                                 opt.map_size_y)
            if self.use_gpu:
                train_data = train_data.cuda(non_blocking=True)
                train_labels = train_labels.cuda()
            if opt.seq_model == 'lstm':
                output, (h, cc) = self.model(train_data)
                loss = self.loss(output[0], train_labels)
            if opt.seq_model == 'tcn':
                output = self.model(train_data)
                loss = self.loss(output, train_labels)

            losses.update(loss.item(), train_data.size(0))

            self.model.zero_grad()
            loss.backward()
            if self.both:
                self.optim_both.step()
            else:
                self.optim_seq.step()

            if b_idx % opt.print_every == 0:
                # logger.debug('%s | %s' % (str(train_labels), str(output)))
                logger.debug(
                    'Train Epoch: {0} [{1}/{2} ({3:.0f}%)]\t Loss {4:<10.3f} \ {5:>10.3f}'
                    .format(epoch, b_idx * len(train_data),
                            len(trainloader) * len(train_data),
                            100. * b_idx / len(trainloader), loss, losses.avg))

            loss_ = loss.item()
            self.iter_loss_train.append(loss_)
            loss_batch += loss_
        losses.reset()

        loss_batch /= len(trainloader)

        logger.debug('Epoch = {} '.format(epoch))
        logger.debug('Train loss = {0}'.format(loss_batch))
        self.train_loss.append(loss_batch)
Exemple #8
0
def create_model():
    torch.manual_seed(opt.seed)
    n_nodes = [64, 96]
    model = TCN_ED(n_nodes, opt.hist, opt.seq_predict,
                   opt.ksize).to(opt.device)
    model.apply(init_weights)

    # loss = nn.MSELoss(reduction='sum')
    loss = nn.MSELoss()

    optimizer = torch.optim.Adam(
        list(model.net.parameters()),
        # list(model.fc.parameters()),
        lr=opt.lr,
        weight_decay=opt.weight_decay)
    logger.debug(str(model))
    logger.debug(str(loss))
    logger.debug(str(optimizer))
    return model, loss, optimizer
                    ball_numbers = list(map(lambda x: int(x), line[1:]))
                    ball_center = ball_numbers[:2]
                    ball_resolution = ball_numbers[2:]
                    ball_center[0] = int(ball_center[0] * 120 /
                                         ball_resolution[0])
                    ball_center[1] = int(ball_center[1] * 160 /
                                         ball_resolution[1])
                    self.balls.append([ball_filename, ball_center])

    def __len__(self):
        return len(self.balls)

    def __getitem__(self, idx):
        filename, center = self.balls[idx]
        img = Image.open(os.path.join(opt.data_root, filename))
        # gt_map = np.zeros((120, 160), dtype=float)

        map_size = (120, 160)
        if self.transform:
            img = self.transform(img)

        return img, np.zeros(map_size, dtype=float), center, os.path.join(
            opt.data_root, filename)


if __name__ == '__main__':
    folder = 'toy.seq'
    dataset = BallDataset(folder)
    features, gt = dataset[38]
    logger.debug('done')
Exemple #10
0
    def test(self, model, testloader):
        '''
        method for testing
        '''
        self.model.eval()
        TP, FP, FN, TN = 0, 0, 0, 0
        l = []
        with torch.no_grad():
            batch_loss = 0
            for idx, (test_data, test_labels, actual_centers,
                      path) in enumerate(testloader):
                if 'new' in opt.dataset:
                    centers = []
                    for x, y in zip(actual_centers[0], actual_centers[1]):
                        centers.append((x, y))
                    actual_centers = centers
                if self.use_gpu:
                    test_data, test_labels = test_data.cuda(
                    ), test_labels.cuda()
                # output = self.model(test_data)
                output = self.model.test(test_data)
                if len(output.shape) < 3:
                    output = output.unsqueeze(0)
                loss_ = self.loss(output, test_labels.float())
                # output = output.cpu().squeeze()
                output = output.cpu()

                out, predicted_centers, maps_area = post_processing(
                    output.numpy(), self.threshold)
                TP_test, FP_test, TN_test, FN_test = tp_fp_tn_fn_alt(
                    actual_centers, predicted_centers, maps_area,
                    self.min_radius)
                TP += TP_test
                FP += FP_test
                FN += FN_test
                TN += TN_test

                if 'new' in opt.dataset:
                    if opt.batch_size > 1:
                        path = path[0]
                        output = output[0]
                    n = int(re.search(r'frame(\d*)', path).group(1))
                    plt.axis('off')
                    output = output.squeeze().numpy()
                    output[0, 0] = 1
                    img = plt.imshow(output)
                    dir_check(opt.save_out)
                    dir_check(os.path.join(opt.save_out, opt.seq_model))
                    plt.savefig(
                        os.path.join(opt.save_out, opt.seq_model, opt.suffix,
                                     '%d_sweaty_output.png' % n))

                # for p in path:
                #     n = int(re.search(r'frame(\d*)_', p).group(1))
                #     if n == 1145:
                #         name = 'tmp' if opt.seq_both_resume else 'sweaty'
                #         plt.axis('off')
                #         output = output.squeeze().numpy()
                #         output[0,0] = 1
                #         # output = output / np.max(output)
                #         img = plt.imshow(output)
                #         plt.savefig(os.path.join(opt.save_out, opt.seq_model, '%s.png'%name))
                #         plt.axis('off')
                #         # out[0] = out[0] / np.max(out[0])
                #         # plt.colorbar()
                #         out[0][0, 0] = 1
                #         img = plt.imshow(out[0])
                #         plt.savefig(os.path.join(opt.save_out, opt.seq_model, '%s_out.png'%name))

                self.iter_loss_test.append(loss_)
                batch_loss += loss_

            batch_loss /= len(testloader)
            FDR_test, RC_test, accuracy_test = performance_metric(
                TP, FP, FN, TN)

            self.fdr_test.append(FDR_test)
            self.accuracy_test.append(accuracy_test)
            self.RC_test.append(RC_test)

            logger.debug('model {} Test TP {} FP {} TN {} FN {}'.format(
                model, TP, FP, TN, FN))
            logger.debug(
                'Test loss = {0} FDR = {1:.4f} , RC {2:.4f} =, accuracy = {3:.4f}'
                .format(batch_loss, FDR_test, RC_test, accuracy_test))

            self.test_loss.append(batch_loss)
from arguments import opt
from py_utils.logging_setup import path_logger, logger
from py_dataset.seq_dataset import RealBallDataset
from py_models import joined_model
from py_train.evaluator import ModelEvaluator
from py_dataset.dataset import SoccerDataSet


path_logger()
random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)
cudnn.benchmark = True

vars_iter = list(vars(opt))
for arg in sorted(vars_iter):
    logger.debug('%s: %s' % (arg, getattr(opt, arg)))


# trainset = BallDataset(opt.seq_dataset)
trainset = RealBallDataset(data_path=opt.seq_real_balls,
                           transform=transforms.Compose([
                               # transforms.RandomResizedCrop(opt.input_size[1]),
                               # transforms.RandomHorizontalFlip(),
                               # transforms.RandomRotation(opt.rot_degree),
                               transforms.ColorJitter(brightness=0.3,
                                                      contrast=0.4, saturation=0.4),
                               transforms.ToTensor(),
                               transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
                           ]),
                           small=False)