Exemplo n.º 1
0
def eval():
    with experiment.test():
        net.eval()
        lsm = nn.LogSoftmax(dim=1)
        Y_pred = {}
        Y_true = {}
        for batch in testloader:
            for sample in batch:
                vid = sample['vid']
                task = sample['task']
                X = sample['X'].cuda() if args.use_gpu else sample['X']
                O = lsm(net(X, task).squeeze(0))
                y = np.zeros(O.size(),dtype=np.float32)
                dp(y,-O.detach().cpu().numpy())
                if task not in Y_pred:
                    Y_pred[task] = {}
                Y_pred[task][vid] = y
                annot_path = os.path.join(args.annotation_path,task+'_'+vid+'.csv')
                if os.path.exists(annot_path):
                    if task not in Y_true:
                        Y_true[task] = {}
                    Y_true[task][vid] = read_assignment(*y.shape, annot_path)
        recalls = get_recalls(Y_true, Y_pred)
        for task,rec in recalls.items():
            print('Task {0}. Recall = {1:0.3f}'.format(task, rec))
        avg_recall = np.mean(list(recalls.values()))
        experiment.log_metric('recall', avg_recall)
        print ('Recall: {0:0.3f}'.format(avg_recall))
        net.train()
Exemplo n.º 2
0
def train_epoch(pretrain=False):
    cumloss = 0.
    for batch in trainloader:
        for sample in batch:
            vid = sample['vid']
            task = sample['task']
            X = sample['X'].cuda() if args.use_gpu else sample['X']
            C = sample['C'].cuda() if args.use_gpu else sample['C']
            if pretrain:
                # picking random assignment, that satisfies the constraints
                O = np.random.rand(X.size()[0],
                                   n_steps[task]) + C.cpu().numpy()
                y = np.zeros(Y[task][vid].shape, dtype=np.float32)
                dp(y, O.astype(np.float32), exactly_one=True)
                Y[task][vid].data = th.tensor(
                    y, dtype=th.float).cuda() if args.use_gpu else th.tensor(
                        y, dtype=th.float)
            else:
                # updating assignment
                O = net(X, task)
                # y = th.tensor(Y[task][vid].data,requires_grad=True)
                y = Y[task][vid].requires_grad_(True)
                loss = loss_fn(O, y, C)
                param_grads = th.autograd.grad(loss,
                                               net.parameters(),
                                               create_graph=True,
                                               only_inputs=True)
                F = loss
                for g in param_grads:
                    F -= 0.5 * args.lr * (g**2).sum()
                Y_grad = th.autograd.grad(F, [y], only_inputs=True)
                y = np.zeros(Y[task][vid].size(), dtype=np.float32)
                dp(y, Y_grad[0].cpu().numpy())
                Y[task][vid].requires_grad_(False)
                Y[task][vid].data = th.tensor(
                    y, dtype=th.float).cuda() if args.use_gpu else th.tensor(
                        y, dtype=th.float)

            # updating model parameters
            O = net(X, task)
            loss = loss_fn(O, Y[task][vid], C)
            loss.backward()
            cumloss += loss.item()
            optimizer.step()
            net.zero_grad()
    return cumloss
Exemplo n.º 3
0
def disparity (left, right, nd=20, LAMBDA=0.025, P1=20, P2f=30, P3=4, T=30):
  """
  Computes the disparity map of two rectified stereo images `left` and `right`.

  `nd` is the maximum value of the disparity labels (-nd <= d < nd)
  `LAMBDA` is the horizontal/vertical DP balancing term
  `P1`, `P2f`, `P3`, `T` are the smoothness model paramters
  """

  if left.ndim == 3:
    left = left.mean (axis=2)
  if right.ndim == 3:
    right = right.mean (axis=2)
  if left.max() <= 1.1:
    left = left * 255.
  if right.max() <= 1.1:
    right = right * 255.

  ######################
  # Vertical tree pass #
  ######################

  # Horizontal pass
  m = dp.data_energy (left, right, nd=nd)

  F = dp.dp (left, right, energy=m, axis=0)
  B = dp.dp (left, right, energy=m, axis=0, backward=True)
  C = F + B - m

  # Vertical pass
  Fc = dp.dp (left, right, energy=C, axis=1)
  Bc = dp.dp (left, right, energy=C, axis=1, backward=True)
  V = Fc + Bc - C

  ###################################
  # Compute subsequent coefficients #
  ###################################

  Vc = m + LAMBDA*(V - V.min(axis=2).reshape((V.shape[0], V.shape[1], 1)))

  ########################
  # Horizontal tree pass #
  ########################

  # Horizontal pass
  F = dp.dp (left, right, energy=Vc, axis=1)
  B = dp.dp (left, right, energy=Vc, axis=1, backward=True)
  C = F + B - Vc

  # Vertical pass
  Fc = dp.dp (left, right, energy=C, axis=0)
  Bc = dp.dp (left, right, energy=C, axis=0, backward=True)
  H = Fc + Bc - C

  return H.argmin(axis=2)
Exemplo n.º 4
0
    def __call__(self, sample, model, Y_pred, Y_true):
        # please install dp from `https://github.com/DmZhukov/CrossTask`
        from dp import dp
        vid, task = sample['video_id'][0], sample['task'][0]
        sample = self.to_ctx(sample)
        # compute the average logits over sliding windows.
        output = model(**sample)
        batch_logits = output["logits"].cpu()

        video_len = sample["video_len"][0]

        # the following version is slow.
        logits = torch.zeros((video_len, batch_logits.size(1)))
        logits_counts = torch.zeros((video_len, 1), dtype=torch.long)
        # use the same loop as aligner to recover.
        batch_logit_idx = 0
        for window_start in range(0, video_len, self.sliding_window):
            video_end = min(video_len - window_start, self.sliding_window_size)
            logits[window_start:window_start +
                   video_end] += batch_logits[batch_logit_idx:batch_logit_idx +
                                              video_end]
            batch_logit_idx += video_end
            logits_counts[window_start:window_start + video_end] += torch.ones(
                (video_end, 1), dtype=torch.long)

            if (video_len - window_start) <= self.sliding_window_size:
                break

        logits /= logits_counts
        assert logits.size() == (video_len,
                                 batch_logits.size(1)), "{}, {}".format(
                                     logits.size(), video_len)

        O = self.lsm(logits)
        y = np.zeros(O.size(), dtype=np.float32)
        dp(y, -O.detach().cpu().numpy())
        if task not in Y_pred:
            Y_pred[task] = {}
        Y_pred[task][vid] = y
        annot_path = os.path.join(self.annotation_path,
                                  task + '_' + vid + '.csv')
        if os.path.exists(annot_path):
            if task not in Y_true:
                Y_true[task] = {}
            Y_true[task][vid] = self._read_assignment(*y.shape, annot_path)
def run_experiment(run_params, out_filename):

    data = pd.DataFrame()

    total_cases = len(
        run_params['search_components']) * run_params['iterations'] * len(
            run_params['dimacs'])
    counter = 0
    #lengths = []

    print("Starting experiment.")
    print("Total {} cases will be run.".format(total_cases))

    for scmp_set in run_params['search_components']:
        scmp_set_name = scmp_set[0]
        scmp_cmp = [tup[0] for tup in scmp_set[1]]
        scmp_cmp_names = [tup[1] for tup in scmp_set[1]]

        for dimacs_set in run_params['dimacs']:
            dimacs_set_name = dimacs_set[0]
            dimacs_set_paths = dimacs_set[1]

            for i in range(run_params['iterations']):
                counter += 1
                print(
                    "Running: scmp_set_name: {}, dimacs_set_name: {}, iteration: {}. Case {} of {} total."
                    .format(scmp_set_name, dimacs_set_name, i, counter,
                            total_cases),
                    end='\r')

                results = dp(dimacs_set_paths, scmp_cmp)

                #TODO : validate here valid

                res_dict = {
                    'SAT': [1 if results['SAT'] else 0],
                    'valid': [1],
                    'dimacs_set': [dimacs_set_name],
                    'iteration': [i],
                    'cmp_set': [scmp_set_name],
                    'backtracks': [results['backtracks']]
                }

                for k, name in enumerate(scmp_cmp_names):
                    res_dict[name] = [
                        results['cnf'].search_cmp[k].get_metrics()
                    ]

                #lengths.append((results['cnf'].length_of_clause, dimacs_set_name+scmp_set_name))

                df = pd.DataFrame(res_dict)
                data = data.append(df, ignore_index=True)

    data.to_csv(out_filename)
    print('')
    print('Done with {} cases. Results written to {}'.format(
        total_cases, out_filename))
Exemplo n.º 6
0
        search_components.PureLiteralComponent(),
        search_components.JWTwoSided()
    ]
    print('Starting DP with a two-sided JW heuristic...')
else:
    srcmp = [
        search_components.UnitClauseComponent(),
        search_components.PureLiteralComponent(),
        search_components.MostConstrainingComponent()
    ]
    print('Starting DP with a most constraining heuristic...')

print('Input DIMACS file: ', args.input_file)

start = time.time()
answer = dp([args.input_file], srcmp)
end = time.time()

splits = srcmp[2].get_metrics()
backtracks = answer['backtracks']

print('Done. {:.3f}s elapsed. {} backtracks, {} splits.'.format(
    end - start, backtracks, splits))

output_file = args.input_file + '.out'

if answer['SAT']:
    print('Result is: SAT. Writing output to', output_file)

    h_output_file = open(output_file, 'w')
    for var in answer['solution']: