Esempio n. 1
0
def test(model_input, labels, model, loss_fn=None, batch_size=32):
    """

    Args:
        model_input: list of tuples containing input to model
        labels: list of tuples containing labels corresponding to model input for training
        model: (torch.nn.Module) the neural network
        loss_fn: a function that takes batch_output and batch_labels and computes the loss for the batch
        batch_size: maximum batch_size

    Returns:
        metrics: dict
    """

    metrics = {}
    for batch_input, batch_labels in zip(grouper(model_input, batch_size),
                                         grouper(labels, batch_size)):
        batch_input = list(
            filter(lambda x: x is not None,
                   batch_input))  # remove None objects introduced by grouper
        batch_labels = list(
            filter(lambda x: x is not None,
                   batch_labels))  # remove None objects introduced by grouper

        batch_metrics = test_batch(batch_input,
                                   batch_labels,
                                   model,
                                   loss_fn=loss_fn)
        add_dict(metrics, batch_metrics)

    return metrics
Esempio n. 2
0
    def test_all():
        '''testing'''
        test_loss = {}
        for i, data in enumerate(test_dataloader):
            pred_dict, loss_dict = trainer.test(data)
            loss_dict['cnt'] = 1
            add_dict(test_loss, loss_dict)

        cnt = test_loss.pop('cnt')
        log_loss_summary(test_loss, cnt, lambda x, y: log_string('real_test {} is {}'.format(x, y)))
Esempio n. 3
0
def floyd_warshall1(graph):
    """solve all point's shortest path
    O(|V|^3)
    get shortest path of all vertex

    Parameters
    ------
    graph: ALGraph

    Returns
    -------
        dict{dict{}}

    example:
    
    >>> a, b, c, d, e = range(1,6) # One-based
    >>> W = {
    >>>     a: {c:1, d:7},
    >>>     b: {a:4},
    >>>     c: {b:-5, e:2},
    >>>     d: {c:6},
    >>>     e: {a:3, b:8, d:-4}
    >>>     }
    >>> for u in W:
    >>>     for v in W:
    >>>         if u == v: W[u][v] = 0
    >>>         if v not in W[u]: W[u][v] = utils.INF
    >>> D = floyd_warshall1(W)
    >>> print [D[a][v] for v in [a, b, c, d, e]] # [0, -4, 1, -1, 3]
    >>> print [D[b][v] for v in [a, b, c, d, e]] # [4, 0, 5, 3, 7]
    >>> print [D[c][v] for v in [a, b, c, d, e]] # [-1, -5, 0, -2, 2]
    >>> print [D[d][v] for v in [a, b, c, d, e]] # [5, 1, 6, 0, 8]
    >>> print [D[e][v] for v in [a, b, c, d, e]] # [1, -3, 2, -4, 0]
    
    """
    distance = copy.deepcopy(graph)
    for k in distance:
        for u in distance:
            for v in distance:
                a, b = 0, 0
                try:
                    a = distance[u][v]
                except:
                    a = utils.INF
                try:
                    b = distance[u][k] + distance[k][v]
                except:
                    b = utils.INF
                else:
                    utils.add_dict(distance, u, v, min(a, b))
    return distance
Esempio n. 4
0
    def save_per_diff(self):
        output_path = self.cfg['experiment_dir']
        timestamp = time.strftime("%m-%d-%H-%M-%S")
        with open(pjoin(output_path, f'{timestamp}.pkl'), 'wb') as f:
            pickle.dump(self.per_diff_dict, f)

        avg_dict = {}
        for inst in self.per_diff_dict:
            add_dict(avg_dict, self.per_diff_dict[inst])
        log_loss_summary(
            avg_dict, len(self.per_diff_dict),
            lambda x, y: print('Test_Real_Avg {} is {}'.format(x, y)))
        per_dict_to_csv(self.per_diff_dict,
                        pjoin(output_path, f'{timestamp}.csv'))
        print(timestamp)
Esempio n. 5
0
    def test_all():
        '''testing'''
        test_loss = {}
        for i, data in enumerate(test_dataloader):
            pred_dict, loss_dict = trainer.test(data)
            loss_dict['cnt'] = 1
            add_dict(test_loss, loss_dict)

        cnt = test_loss.pop('cnt')
        log_loss_summary(test_loss, cnt,
                         lambda x, y: log_string('Test {} is {}'.format(x, y)))

        if val_dataloader is not None:
            val_loss = {}
            for i, data in enumerate(val_dataloader):
                pred_dict, loss_dict = trainer.test(data)
                loss_dict['cnt'] = 1
                add_dict(val_loss, loss_dict)

            cnt = val_loss.pop('cnt')
            log_loss_summary(
                val_loss, cnt, lambda x, y: log_string('{} {} is {}'.format(
                    args.use_val, x, y)))
Esempio n. 6
0
def interpret_file(lines, fdict):
    '''
    This function acts like the main loop.  It has side effects.  Unfortunately this program sorta lends itself to those.  Or it might just be that i'm not a real programmer.  Likely the latter.
    '''
    statedict = {'absolute' : True, 'inches' : True}
    args = {'X':0, 'Y':0, 'Z':0, 'SD' : statedict}
    for l in lines:
        #The 'e' represents the args and predicate from the expression
        epred, eargs = parse.line(l)
        args['OX'], args['OY'], args['OZ'], args['OC'] = args['X'], args['Y'], args['Z'], [epred, eargs]
        args = utils.add_dict(args, eargs)
        if not statedict['absolute']:
            #If incremental
            for x in ['X', 'Y', 'Z']:
                args[c] += args['O' + c]
        if epred in fdict.keys():
            fdict[epred](args)
        else:
            #This is just so I know what to implement
            print epred
            
           
    return statedict
Esempio n. 7
0
    def compute_loss(self,
                     test=False,
                     per_instance=False,
                     eval_iou=False,
                     test_prefix=None):
        feed_dict = self.feed_dict
        pred_dict = self.pred_dict
        loss_dict = {}

        avg_pose_diff, all_pose_diff = {}, {}
        avg_init_diff, all_init_diff = {}, {}
        avg_iou, all_iou = {}, {}
        avg_seg_loss, all_seg_loss = [], {}
        avg_nocs_loss, all_nocs_loss = [], {}
        gt_corners = feed_dict[0]['meta']['nocs_corners'].float().to(
            self.device)

        for i, pred_pose in enumerate(pred_dict['poses']):

            pose_diff, per_diff = eval_part_full(feed_dict[i]['gt_part'],
                                                 pred_pose,
                                                 per_instance=per_instance,
                                                 yaxis_only=self.sym)

            if i > 0:
                add_dict(avg_pose_diff, pose_diff)
                if per_instance:
                    self.record_per_diff(feed_dict[i], per_diff)

            all_pose_diff[i] = deepcopy(pose_diff)

            if i > 0:
                init_pose_diff, init_per_diff = eval_part_full(
                    feed_dict[i]['gt_part'],
                    pred_dict['poses'][i - 1],
                    per_instance=per_instance,
                    yaxis_only=self.sym)

                add_dict(avg_init_diff, init_pose_diff)
                all_init_diff[i] = deepcopy(init_pose_diff)

            if i > 0:
                if 'labels' in self.npcs_feed_dict[i]:
                    seg_loss = compute_miou_loss(
                        pred_dict['npcs_pred'][i]['seg'],
                        self.npcs_feed_dict[i]['labels'],
                        per_instance=False)
                    avg_seg_loss.append(seg_loss)
                    all_seg_loss[i] = seg_loss

                pred_labels = torch.max(pred_dict['npcs_pred'][i]['seg'],
                                        dim=-2)[1]  # [B, P, N] -> [B, N]

                if 'nocs' in self.npcs_feed_dict[i]:
                    nocs_loss = compute_nocs_loss(
                        pred_dict['npcs_pred'][i]['nocs'],
                        self.npcs_feed_dict[i]['nocs'],
                        labels=pred_labels,
                        confidence=None,
                        loss='l2',
                        self_supervise=False,
                        per_instance=False)
                    avg_nocs_loss.append(nocs_loss)
                    all_nocs_loss[i] = nocs_loss

                pred_nocs = choose_coord_by_label(
                    pred_dict['npcs_pred'][i]['nocs'].transpose(-1, -2),
                    pred_labels)

                if eval_iou:
                    pred_corners = get_pred_nocs_corners(
                        pred_labels, pred_nocs, self.num_parts)
                    pred_corners = torch.tensor(pred_corners).to(
                        self.device).float()

                    def calc_iou(gt_pose, pred_pose):
                        iou, per_iou = eval_single_part_iou(gt_corners,
                                                            pred_corners,
                                                            gt_pose,
                                                            pred_pose,
                                                            separate='both',
                                                            nocs=self.nocs_otf,
                                                            sym=self.sym)

                        return iou, per_iou

                    iou, per_iou = calc_iou(feed_dict[i]['gt_part'], pred_pose)
                    add_dict(avg_iou, iou)
                    if per_instance:
                        self.record_per_diff(feed_dict[i], per_iou)
                    all_iou[i] = deepcopy(iou)

        avg_pose_diff = divide_dict(avg_pose_diff, len(pred_dict['poses']) - 1)
        avg_init_diff = divide_dict(avg_init_diff, len(pred_dict['poses']) - 1)
        loss_dict.update({
            'avg_pred': avg_pose_diff,
            'avg_init': avg_init_diff,
            'frame_pred': all_pose_diff,
            'frame_init': all_init_diff
        })
        if len(avg_seg_loss) > 0:
            avg_seg_loss = torch.mean(torch.stack(avg_seg_loss))
            loss_dict.update({
                'avg_seg': avg_seg_loss,
                'frame_seg': all_seg_loss
            })
        if len(avg_nocs_loss) > 0:
            avg_nocs_loss = torch.mean(torch.stack(avg_nocs_loss))
            loss_dict.update({
                'avg_nocs': avg_nocs_loss,
                'frame_seg': all_nocs_loss
            })
        if eval_iou:
            avg_iou = divide_dict(avg_iou, len(pred_dict['poses']) - 1)
            loss_dict.update({'avg_iou': avg_iou, 'frame_iou': all_iou})

        self.loss_dict = loss_dict
Esempio n. 8
0
def main(args):
    def log_string(str):
        logger.info(str)
        print(str)

    cfg = get_config(args)
    '''LOG'''
    log_dir = pjoin(cfg['experiment_dir'], 'log')
    ensure_dirs(log_dir)

    logger = logging.getLogger("TrainModel")
    logger.setLevel(logging.INFO)
    formatter = logging.Formatter(
        '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    file_handler = logging.FileHandler('%s/log.txt' % (log_dir))
    file_handler.setLevel(logging.INFO)
    file_handler.setFormatter(formatter)
    logger.addHandler(file_handler)
    log_string('PARAMETER ...')
    log_string(cfg)
    '''DATA'''
    train_dataloader = get_dataloader(cfg, 'train', shuffle=True)
    test_dataloader = get_dataloader(cfg, 'test')

    if args.use_val is not None:
        val_dataloader = get_dataloader(cfg, args.use_val)
    else:
        val_dataloader = None
    '''TRAINER'''
    trainer = Trainer(cfg, logger)
    start_epoch = trainer.resume()

    def test_all():
        '''testing'''
        test_loss = {}
        for i, data in enumerate(test_dataloader):
            pred_dict, loss_dict = trainer.test(data)
            loss_dict['cnt'] = 1
            add_dict(test_loss, loss_dict)

        cnt = test_loss.pop('cnt')
        log_loss_summary(test_loss, cnt,
                         lambda x, y: log_string('Test {} is {}'.format(x, y)))

        if val_dataloader is not None:
            val_loss = {}
            for i, data in enumerate(val_dataloader):
                pred_dict, loss_dict = trainer.test(data)
                loss_dict['cnt'] = 1
                add_dict(val_loss, loss_dict)

            cnt = val_loss.pop('cnt')
            log_loss_summary(
                val_loss, cnt, lambda x, y: log_string('{} {} is {}'.format(
                    args.use_val, x, y)))

    for epoch in range(start_epoch, cfg['total_epoch']):
        trainer.step_epoch()
        train_loss = {}
        '''training'''
        for i, data in enumerate(train_dataloader):
            loss_dict = trainer.update(data)
            loss_dict['cnt'] = 1
            add_dict(train_loss, loss_dict)

        cnt = train_loss.pop('cnt')
        log_loss_summary(
            train_loss, cnt,
            lambda x, y: log_string('Train {} is {}'.format(x, y)))

        if (epoch + 1) % cfg['freq']['save'] == 0:
            trainer.save()

        test_all()
Esempio n. 9
0
    args = parse_args()
    cfg = get_config(args, save=False)
    base_path = cfg['obj']['basepath']
    obj_category = cfg['obj_category']

    obj_info = cfg['obj_info']

    data_path = pjoin(cfg['experiment_dir'], 'results', 'data')

    all_raw = os.listdir(data_path)
    all_raw = sorted(all_raw)

    error_dict = {}

    for i, raw in enumerate(all_raw):
        name = raw.split('.')[-2]
        with open(pjoin(data_path, raw), 'rb') as f:
            data = pickle.load(f)
        cur_dict = eval_data(name, data, obj_info)
        error_dict.update(cur_dict)

    err_path = pjoin(cfg['experiment_dir'], 'results', 'err.pkl')
    with open(err_path, 'wb') as f:
        pickle.dump(error_dict, f)
    avg_dict = {}
    for inst in error_dict:
        add_dict(avg_dict, error_dict[inst])
    log_loss_summary(avg_dict, len(error_dict),
                     lambda x, y: print(f'{x}: {y}'))
    per_dict_to_csv(error_dict, err_path.replace('pkl', 'csv'))
Esempio n. 10
0
def main(args):
    def log_string(str):
        logger.info(str)
        print(str)

    cfg = get_config(args, save=False)

    '''LOG'''
    log_dir = pjoin(cfg['experiment_dir'], 'log')
    ensure_dirs(log_dir)

    logger = logging.getLogger("TestModel")
    logger.setLevel(logging.INFO)
    formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    file_handler = logging.FileHandler('%s/log_test.txt' % (log_dir))
    file_handler.setLevel(logging.INFO)
    file_handler.setFormatter(formatter)
    logger.addHandler(file_handler)
    log_string('PARAMETER ...')
    log_string(cfg)

    '''TRAINER'''
    trainer = Trainer(cfg, logger)
    trainer.resume()

    '''testing'''
    save = cfg['save']
    no_eval = cfg['no_eval']

    dataset_name = args.mode_name

    test_dataloader = get_dataloader(cfg, dataset_name)
    test_loss = {'cnt': 0}

    zero_time = time.time()
    time_dict = {'data_proc': 0.0, 'network': 0.0}
    total_frames = 0

    for i, data in tqdm(enumerate(test_dataloader), total=len(test_dataloader), smoothing=0.9):
        num_frames = len(data)
        total_frames += num_frames
        print(f'Trajectory {i}, {num_frames:8} frames****************************')

        start_time = time.time()
        elapse = start_time - zero_time
        time_dict['data_proc'] += elapse
        print(f'Data Preprocessing: {elapse:8.2f}s {num_frames / elapse:8.2f}FPS')

        pred_dict, loss_dict = trainer.test(data, save=save, no_eval=no_eval)

        elapse = time.time() - start_time
        time_dict['network'] += elapse
        print(f'Network Forwarding: {elapse:8.2f}s {num_frames / elapse:8.2f}FPS')

        loss_dict['cnt'] = 1
        add_dict(test_loss, loss_dict)

        zero_time = time.time()

    print(f'Overall, {total_frames:8} frames****************************')
    print(f'Data Preprocessing: {time_dict["data_proc"]:8.2f}s {total_frames / time_dict["data_proc"]:8.2f}FPS')
    print(f'Network Forwarding: {time_dict["network"]:8.2f}s {total_frames / time_dict["network"]:8.2f}FPS')
    if cfg['batch_size'] > 1:
        print(f'PLEASE SET batch_size = 1 TO TEST THE SPEED. CURRENT BATCH_SIZE: cfg["batch_size"]')

    cnt = test_loss.pop('cnt')
    log_loss_summary(test_loss, cnt, lambda x, y: log_string('Test {} is {}'.format(x, y)))
    if save and not no_eval:
        trainer.model.save_per_diff()
Esempio n. 11
0
def train_and_evaluate(input_train,
                       labels_train,
                       input_val,
                       labels_val,
                       model,
                       optimizer,
                       loss_fn,
                       epochs=1,
                       batch_size=32,
                       clip=None,
                       show_progress=True,
                       writer=None,
                       model_save_dir=None,
                       starting_epoch=1,
                       initial_best_val_acc=-0.01):
    """Trains and tests the model (on the validation set). Additionally saves the model with best val accuracy.

    Args:
        input_train: list of tuples containing model input for training
        labels_train: list of tuples containing labels corresponding to model input for training
        input_val: list of tuples containing model input for validation
        labels_val list of tuples containing labels corresponding to model input for validation
        model: (torch.nn.Module) the neural network
        optimizer: (torch.optim) optimizer for parameters of model
        loss_fn: a function that takes batch_output and batch_labels and computes the loss for the batch
        epochs: number of epochs to run
        batch_size: maximum batch_size
        clip: the value to which clip the norm of gradients to
        writer: tensorboardX.SummaryWriter
        model_save_dir: directory where to save the model
        starting_epoch: epoch number to start with.
        initial_best_val_acc: initial best accuracy on validation set, useful if resuming training.
    """

    best_val_acc = initial_best_val_acc
    if show_progress:
        t = trange(starting_epoch, starting_epoch + epochs)
    else:
        t = range(starting_epoch, starting_epoch + epochs)

    for epoch_num in t:
        # shuffle training data
        input_train, labels_train = shuffle_together(input_train, labels_train)

        epoch_metrics = {
        }  # dictionary which stores metrics for a particular epoch.
        for batch_input, batch_labels in zip(grouper(input_train, batch_size),
                                             grouper(labels_train,
                                                     batch_size)):
            batch_input = list(filter(
                lambda x: x is not None,
                batch_input))  # remove None objects introduced by grouper
            batch_labels = list(filter(
                lambda x: x is not None,
                batch_labels))  # remove None objects introduced by grouper

            batch_metrics = train(batch_input, batch_labels, model, optimizer,
                                  loss_fn, clip)
            add_dict(epoch_metrics, batch_metrics)

        val_metrics = test(input_val, labels_val, model, loss_fn, batch_size)

        epoch_metrics = divide_dict(epoch_metrics, len(input_train))
        val_metrics = divide_dict(val_metrics, len(input_val))

        # Saves best model till now.
        if model_save_dir:
            if val_metrics['acc'] > best_val_acc:
                best_val_acc = val_metrics['acc']
                save_model(epoch_num, {
                    'epoch_num': epoch_num,
                    'val_acc': val_metrics['acc'],
                    'model_state': model.state_dict(),
                    'optimizer_state': optimizer.state_dict()
                },
                           model_save_dir,
                           filename='best.model')

        metrics = {
            **add_string_to_key(epoch_metrics, 'train'),
            **add_string_to_key(val_metrics, 'val')
        }

        # Write to tensorboard
        if writer is not None:
            for key, value in metrics.items():
                writer.add_scalar(key, value, epoch_num)

        if show_progress:
            t.set_postfix(metrics)
Esempio n. 12
0
def main(args):
    def log_string(str):
        logger.info(str)
        print(str)

    cfg = get_config(args)

    '''LOG'''
    log_dir = pjoin(cfg['experiment_dir'], 'log')
    ensure_dirs(log_dir)

    logger = logging.getLogger("TrainModel")
    logger.setLevel(logging.INFO)
    formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    file_handler = logging.FileHandler('%s/log_finetune.txt' % (log_dir))
    file_handler.setLevel(logging.INFO)
    file_handler.setFormatter(formatter)
    logger.addHandler(file_handler)
    log_string('PARAMETER ...')
    log_string(cfg)

    '''DATA'''
    test_dataloader = get_dataloader(cfg, args.use_val, downsampling=args.downsample)

    train_real_dataloader = get_dataloader(cfg, 'real_train', shuffle=True)
    syn_train_len = len(train_real_dataloader) * args.syn_n

    train_syn_dataloader = get_dataloader(cfg, 'train', shuffle=True)
    syn_train_cycle = iter(train_syn_dataloader)
    num_div = len(train_syn_dataloader) // syn_train_len

    '''TRAINER'''
    trainer = Trainer(cfg, logger)
    start_epoch = trainer.resume()

    def test_all():
        '''testing'''
        test_loss = {}
        for i, data in enumerate(test_dataloader):
            pred_dict, loss_dict = trainer.test(data)
            loss_dict['cnt'] = 1
            add_dict(test_loss, loss_dict)

        cnt = test_loss.pop('cnt')
        log_loss_summary(test_loss, cnt, lambda x, y: log_string('real_test {} is {}'.format(x, y)))

    test_all()

    for epoch in range(start_epoch, cfg['total_epoch']):
        trainer.step_epoch()

        '''training'''
        if not args.real_only:
            train_loss = {}
            for i in range(syn_train_len):
                data = next(syn_train_cycle)
                loss_dict = trainer.update(data)
                loss_dict['cnt'] = 1
                add_dict(train_loss, loss_dict)

            cnt = train_loss.pop('cnt')
            log_loss_summary(train_loss, cnt, lambda x, y: log_string('Syn_Train {} is {}'.format(x, y)))

        train_loss = {}
        for i, data in enumerate(train_real_dataloader):
            loss_dict = trainer.update(data)
            loss_dict['cnt'] = 1
            add_dict(train_loss, loss_dict)

        cnt = train_loss.pop('cnt')
        log_loss_summary(train_loss, cnt, lambda x, y: log_string('Real_Train {} is {}'.format(x, y)))

        if (epoch + 1) % cfg['freq']['save'] == 0:
            trainer.save()

        test_all()
        if (epoch + 1) % num_div == 0:
            syn_train_cycle = iter(train_syn_dataloader)