Example #1
0
def test():
    checkpoint_pytorch = '/notebooks/vinet/vinet_v1_01.pt'
    if os.path.isfile(checkpoint_pytorch):
        checkpoint = torch.load(checkpoint_pytorch,\
                            map_location=lambda storage, loc: storage.cuda(0))
        #best_err = checkpoint['best_EPE']
    else:
        print('No checkpoint')

    model = Vinet()
    model.load_state_dict(checkpoint)
    model.cuda()
    model.eval()
    mydataset = MyDataset('/notebooks/EuRoC_modify/', 'V2_01_easy')

    err = 0
    ans = []
    abs_traj = None
    start = 5
    #for i in range(len(mydataset)-1):
    for i in range(start, 100):
        data, data_imu, target, target2 = mydataset.load_img_bat(i, 1)
        data, data_imu, target, target2 = data.cuda(), data_imu.cuda(
        ), target.cuda(), target2.cuda()

        if i == start:
            ## load first SE3 pose xyzQuaternion
            abs_traj = mydataset.getTrajectoryAbs(start)
            abs_traj = np.expand_dims(abs_traj, axis=0)
            abs_traj = np.expand_dims(abs_traj, axis=0)
            abs_traj = Variable(
                torch.from_numpy(abs_traj).type(torch.FloatTensor).cuda())

        output = model(data, data_imu, abs_traj)

        err += float(((target - output)**2).mean())

        output = output.data.cpu().numpy()

        xyzq = se3qua.se3R6toxyzQ(output)

        abs_traj = abs_traj.data.cpu().numpy()[0]
        numarr = output

        abs_traj = se3qua.accu(abs_traj, numarr)
        abs_traj = np.expand_dims(abs_traj, axis=0)
        abs_traj = np.expand_dims(abs_traj, axis=0)
        abs_traj = Variable(
            torch.from_numpy(abs_traj).type(torch.FloatTensor).cuda())

        ans.append(xyzq)
        print(xyzq)
        print('{}/{}'.format(str(i + 1), str(len(mydataset) - 1)))

    print('err = {}'.format(err / (len(mydataset) - 1)))
    trajectoryAbs = mydataset.getTrajectoryAbsAll()
    print(trajectoryAbs[0])
    x = trajectoryAbs[0].astype(str)
    x = ",".join(x)

    with open(
            '/notebooks/EuRoC_modify/V2_01_easy/vicon0/sampled_relative_ans.csv',
            'w+') as f:
        tmpStr = x
        f.write(tmpStr + '\n')

        for i in range(len(ans) - 1):
            tmpStr = ans[i].astype(str)
            tmpStr = ",".join(tmpStr)
            print(tmpStr)
            print(type(tmpStr))
            f.write(tmpStr + '\n')
Example #2
0
    def train(self):

        # Switch model to train mode
        self.model.train()

        # Check if maxEpochs have elapsed
        if self.curEpoch >= self.maxEpochs:
            print('Max epochs elapsed! Returning ...')
            return

        # Increment iters
        self.iters += 1

        # Variables to store stats

        r6Losses = []
        poseLosses = []
        totalLosses = []
        r6Loss_seq = []
        poseLoss_seq = []
        totalLoss_seq = []

        # Handle debug mode here
        if self.args.debug is True:
            numTrainIters = self.args.debugIters
        else:
            numTrainIters = len(self.train_set)

        elapsedBatches = 0
        traj_pred = None
        gen = trange(numTrainIters)
        print("gen", gen)
        # assert False
        # Run a pass of the dataset
        for i in gen:
            if self.args.profileGPUUsage is True:
                gpu_memory_map = get_gpu_memory_map()
                tqdm.write('GPU usage: ' + str(gpu_memory_map[0]),
                           file=sys.stdout)

            # Get the next frame
            inp, imu, r6, xyzq, _, _, _, timestamp, endOfSeq = self.train_set[
                i]
            pred_r6 = self.model.forward(inp, imu, xyzq)
            # del inp
            # del imu
            if self.abs_traj is None:
                # TODO : 여기 초기값 잘 부르고 잘 적분해서 계산하고 있는지 확인해야됨.
                self.abs_traj = xyzq.data.cpu()[0][0]
                # Feed it through the model
            numarr = pred_r6.data.cpu().numpy()[0][0]
            # print('start :',self.abs_traj)
            # print('numarr :', numarr)

            self.abs_traj = se3qua.accu(self.abs_traj, numarr)
            # print('abs_traj :', self.abs_traj)

            abs_traj_input = np.expand_dims(self.abs_traj, axis=0)
            abs_traj_input = np.expand_dims(abs_traj_input, axis=0)
            abs_traj_input = Variable(
                torch.from_numpy(abs_traj_input).type(
                    torch.FloatTensor)).cuda()
            # print(abs_traj_input)
            # raise Exception()

            curloss_r6 = Variable(self.args.scf * (torch.dist(pred_r6, r6)**2),
                                  requires_grad=False)
            curloss_xyzq = Variable(torch.dist(abs_traj_input, xyzq)**2,
                                    requires_grad=False)

            curloss_xyzq_trans = Variable(
                self.args.scf * 10 *
                (torch.dist(abs_traj_input[:, :, :3], xyzq[:, :, :3])**2),
                requires_grad=False)
            curloss_xyzq_rot = Variable(torch.dist(abs_traj_input[:, :, 3:],
                                                   xyzq[:, :, 3:])**2,
                                        requires_grad=False)
            self.loss_r6 = curloss_r6
            self.loss_xyzq = curloss_xyzq

            # if np.random.normal() < -0.9:
            #     tqdm.write('r6(pred,gt): ' + str(pred_r6.data)+' '+ str(r6.data) ,file=sys.stdout)
            #     tqdm.write('pose(pred,gt): ' + str(abs_traj_input.data) + ' '+str(xyzq.data), file=sys.stdout)

            self.loss += sum([
                self.args.scf * (self.loss_fn(pred_r6, r6)).item(),
                self.args.scf * 10 *
                self.loss_fn(abs_traj_input[:, :, :3], xyzq[:, :, :3]).item(),
                self.loss_fn(abs_traj_input[:, :, 3:], xyzq[:, :, 3:]).item()
            ])

            curloss_r6 = curloss_r6.detach().cpu().numpy()
            curloss_xyzq = curloss_xyzq.detach().cpu().numpy()
            curloss_xyzq_rot = curloss_xyzq_rot.detach().cpu().numpy()
            curloss_xyzq_trans = curloss_xyzq_trans.detach().cpu().numpy()
            r6Losses.append(curloss_r6)
            r6Loss_seq.append(curloss_r6)
            poseLosses.append(curloss_xyzq_rot + curloss_xyzq_trans)
            poseLoss_seq.append(curloss_xyzq_rot + curloss_xyzq_trans)
            totalLosses.append(curloss_r6 + curloss_xyzq_rot +
                               curloss_xyzq_trans)
            totalLoss_seq.append(curloss_r6 + curloss_xyzq_rot +
                                 curloss_xyzq_trans)
            del curloss_r6
            del curloss_xyzq

            # Handle debug mode here. Force execute the below if statement in the
            # last debug iteration
            if self.args.debug is True:
                if i == numTrainIters - 1:
                    endOfSeq = True

            elapsedBatches += 1

            # if endOfSeq is True:
            if endOfSeq is True:
                elapsedBatches = 0

                # if self.args.gamma > 0.0:
                #     paramsDict = self.model.state_dict()
                #     # print(paramsDict.keys())
                #
                #     if self.args.numLSTMCells == 1:
                #         reg_loss = None
                #         reg_loss = paramsDict['lstm1.weight_ih'].norm(2)
                #         reg_loss += paramsDict['lstm1.weight_hh'].norm(2)
                #         reg_loss += paramsDict['lstm1.bias_ih'].norm(2)
                #         reg_loss += paramsDict['lstm1.bias_hh'].norm(2)
                #     else:
                #         reg_loss = None
                #         # reg_loss = paramsDict['rnnIMU.weight_ih_l0'].norm(2)
                #         # reg_loss += paramsDict['rnnIMU.weight_hh_l0'].norm(2)
                #         # reg_loss += paramsDict['rnnIMU.bias_ih_l0'].norm(2)
                #         # reg_loss += paramsDict['rnnIMU.bias_hh_l0'].norm(2)
                #         # reg_loss += paramsDict['rnnIMU.weight_ih_l1'].norm(2)
                #         # reg_loss += paramsDict['rnnIMU.weight_Hh_l1'].norm(2)
                #         # reg_loss += paramsDict['rnnIMU.bias_ih_l1'].norm(2)
                #         # reg_loss += paramsDict['rnnIMU.bias_Hh_l1'].norm(2)
                #         reg_loss = paramsDict['rnn.weight_ih_l0'].norm(2)
                #         reg_loss += paramsDict['rnn.weight_hh_l0'].norm(2)
                #         reg_loss += paramsDict['rnn.bias_ih_l0'].norm(2)
                #         reg_loss += paramsDict['rnn.bias_hh_l0'].norm(2)
                #         reg_loss += paramsDict['rnn.weight_ih_l1'].norm(2)
                #         reg_loss += paramsDict['rnn.weight_Hh_l1'].norm(2)
                #         reg_loss += paramsDict['rnn.bias_ih_l1'].norm(2)
                #         reg_loss += paramsDict['rnn.bias_Hh_l1'].norm(2)
                #     self.loss = sum([self.args.gamma * reg_loss, self.loss])
                tqdm.write('r6 Loss: ' + str(np.mean(r6Loss_seq)) +
                           'pose Loss' + str(np.mean(poseLoss_seq)),
                           file=sys.stdout)
                r6Loss_seq = []
                poseLoss_seq = []
                totalLoss_seq = []

                # Compute gradients
                self.loss.backward()

                paramList = list(
                    filter(lambda p: p.grad is not None,
                           [param for param in self.model.parameters()]))
                totalNorm = sum([(p.grad.data.norm(2.)**2.)
                                 for p in paramList])**(1. / 2)
                tqdm.write('gradNorm: ' + str(totalNorm.item()))

                # Perform gradient clipping, if enabled
                if self.args.gradClip is not None:
                    torch.nn.utils.clip_grad_norm_(self.model.parameters(),
                                                   self.args.gradClip)

                # Update parameters
                self.optimizer.step()

                # If it's the end of sequence, reset hidden states
                # if endOfSeq is True:
                #     self.model.reset_LSTM_hidden()
                # self.model.detach_LSTM_hidden()  # ???

                # Reset loss variables
                self.loss_r6 = torch.zeros(1, dtype=torch.float32).cuda()
                self.loss_xyzq = torch.zeros(1, dtype=torch.float32).cuda()
                self.loss = torch.zeros(1, dtype=torch.float32).cuda()

                # Flush gradient buffers for next forward pass
                self.model.zero_grad()
                self.abs_traj = None

        return r6Losses, poseLosses, totalLosses
Example #3
0
def train():
    epoch = 10
    batch = 1
    model = Vinet()
    optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)
    #optimizer = optim.Adam(model.parameters(), lr = 0.001)

    writer = SummaryWriter()

    model.train()

    mydataset = MyDataset('/notebooks/EuRoC_modify/', 'V1_01_easy')
    #criterion  = nn.MSELoss()
    criterion = nn.L1Loss(size_average=False)

    start = 5
    end = len(mydataset) - batch
    batch_num = (end - start)  #/ batch
    startT = time.time()
    abs_traj = None

    with tools.TimerBlock("Start training") as block:
        for k in range(epoch):
            for i in range(start, end):  #len(mydataset)-1):
                data, data_imu, target_f2f, target_global = mydataset.load_img_bat(
                    i, batch)
                data, data_imu, target_f2f, target_global = \
                    data.cuda(), data_imu.cuda(), target_f2f.cuda(), target_global.cuda()

                optimizer.zero_grad()

                if i == start:
                    ## load first SE3 pose xyzQuaternion
                    abs_traj = mydataset.getTrajectoryAbs(start)

                    abs_traj_input = np.expand_dims(abs_traj, axis=0)
                    abs_traj_input = np.expand_dims(abs_traj_input, axis=0)
                    abs_traj_input = Variable(
                        torch.from_numpy(abs_traj_input).type(
                            torch.FloatTensor).cuda())

                ## Forward
                output = model(data, data_imu, abs_traj_input)

                ## Accumulate pose
                numarr = output.data.cpu().numpy()

                abs_traj = se3qua.accu(abs_traj, numarr)

                abs_traj_input = np.expand_dims(abs_traj, axis=0)
                abs_traj_input = np.expand_dims(abs_traj_input, axis=0)
                abs_traj_input = Variable(
                    torch.from_numpy(abs_traj_input).type(
                        torch.FloatTensor).cuda())

                ## (F2F loss) + (Global pose loss)
                ## Global pose: Full concatenated pose relative to the start of the sequence
                loss = criterion(output, target_f2f) + criterion(
                    abs_traj_input, target_global)

                loss.backward()
                optimizer.step()

                avgTime = block.avg()
                remainingTime = int(
                    (batch_num * epoch - (i + batch_num * k)) * avgTime)
                rTime_str = "{:02d}:{:02d}:{:02d}".format(
                    int(remainingTime / 60 // 60),
                    int(remainingTime // 60 % 60), int(remainingTime % 60))

                block.log(
                    'Train Epoch: {}\t[{}/{} ({:.0f}%)]\tLoss: {:.6f}, TimeAvg: {:.4f}, Remaining: {}'
                    .format(k, i, batch_num,
                            100. * (i + batch_num * k) / (batch_num * epoch),
                            loss.data[0], avgTime, rTime_str))

                writer.add_scalar('loss', loss.data[0], k * batch_num + i)

            check_str = 'checkpoint_{}.pt'.format(k)
            torch.save(model.state_dict(), check_str)

    #torch.save(model, 'vinet_v1_01.pt')
    #model.save_state_dict('vinet_v1_01.pt')
    torch.save(model.state_dict(), 'vinet_v1_01.pt')
    writer.export_scalars_to_json("./all_scalars.json")
    writer.close()
Example #4
0
    def validate(self):

        # Switch model to eval mode
        self.model.eval()

        # Run a pass of the dataset
        traj_pred = None
        self.abs_traj = None

        # Variables to store stats
        r6Losses = []
        poseLosses = []
        totalLosses = []
        r6Loss_seq = []
        poseLoss_seq = []
        totalLoss_seq = []

        # Handle debug switch here
        if self.args.debug is True:
            numValIters = self.args.debugIters
        else:
            numValIters = len(self.val_set)

        # Choose a generator (for iterating over the dataset, based on whether or not the
        # sbatch flag is set to True). If sbatch is True, we're probably running on a cluster
        # and do not want an interactive output. So, could suppress tqdm and print statements
        if self.args.sbatch is True:
            gen = range(numValIters)
        else:
            gen = trange(numValIters)

        for i in gen:

            if self.args.profileGPUUsage is True:
                gpu_memory_map = get_gpu_memory_map()
                tqdm.write('GPU usage: ' + str(gpu_memory_map[0]),
                           file=sys.stdout)

            # Get the next frame
            inp, imu, r6, xyzq, seq, frame1, frame2, timestamp, endOfSeq = self.val_set[
                i]

            metadata = np.asarray([timestamp])

            # Feed it through the model
            pred_r6 = self.model.forward(inp, imu, xyzq)
            numarr = pred_r6.data.cpu().detach().numpy()[0][0]

            if self.abs_traj is None:
                self.abs_traj = xyzq.data.cpu().detach()[0][0]
            if traj_pred is None:
                traj_pred = np.concatenate((metadata, self.abs_traj.numpy()),
                                           axis=0)
                traj_pred = np.resize(traj_pred, (1, -1))

            self.abs_traj = se3qua.accu(self.abs_traj, numarr)

            cur_pred = np.concatenate((metadata, self.abs_traj), axis=0)
            traj_pred = np.append(traj_pred,
                                  np.resize(cur_pred, (1, -1)),
                                  axis=0)

            abs_traj_input = np.expand_dims(self.abs_traj, axis=0)
            abs_traj_input = np.expand_dims(abs_traj_input, axis=0)
            abs_traj_input = Variable(
                torch.from_numpy(abs_traj_input).type(
                    torch.FloatTensor)).cuda()

            # Store losses (for further analysis)
            curloss_r6 = Variable(self.args.scf * (torch.dist(pred_r6, r6)**2),
                                  requires_grad=False)
            curloss_xyzq = Variable(self.args.scf *
                                    (torch.dist(abs_traj_input, xyzq)**2),
                                    requires_grad=False)
            curloss_xyzq_trans = Variable(
                self.args.scf * 10 *
                (torch.dist(abs_traj_input[:, :, :3], xyzq[:, :, :3])**2),
                requires_grad=False)
            curloss_xyzq_rot = Variable(torch.dist(abs_traj_input[:, :, 3:],
                                                   xyzq[:, :, 3:])**2,
                                        requires_grad=False)

            curloss_r6 = curloss_r6.detach().cpu().numpy()
            curloss_xyzq = curloss_xyzq.detach().cpu().numpy()
            curloss_xyzq_rot = curloss_xyzq_rot.detach().cpu().numpy()
            curloss_xyzq_trans = curloss_xyzq_trans.detach().cpu().numpy()

            r6Losses.append(curloss_r6)
            r6Loss_seq.append(curloss_r6)
            poseLosses.append(curloss_xyzq_rot + curloss_xyzq_trans)
            poseLoss_seq.append(curloss_xyzq_rot + curloss_xyzq_trans)
            totalLosses.append(curloss_r6 + curloss_xyzq_rot +
                               curloss_xyzq_trans)
            totalLoss_seq.append(curloss_r6 + curloss_xyzq_rot +
                                 curloss_xyzq_trans)
            del curloss_r6
            del curloss_xyzq
            # Detach hidden states and outputs of LSTM
            # self.model.detach_LSTM_hidden()

            if endOfSeq is True:
                r6Loss_seq = []
                poseLoss_seq = []
                totalLoss_seq = []
                # Print stats

                tqdm.write('Total Loss: ' + str(np.mean(totalLoss_seq)),
                           file=sys.stdout)

                # Write predicted trajectory to file
                saveFile = os.path.join(self.args.expDir, 'plots', 'traj', str(seq).zfill(2), \
                                        'traj_' + str(self.curEpoch).zfill(3) + '.txt')
                # TODO : 트래젝토리 저장부분 왜 한개만 저장하고 마지막 저장은 좀 이상하게 (짧게, 그리고 6컬럼만) 되는지 확인
                np.savetxt(saveFile, traj_pred, newline='\n')

                # Reset variable, to store new trajectory later on
                traj_pred = None

                # Detach LSTM hidden states
                # self.model.detach_LSTM_hidden()

                # Reset LSTM hidden states
                # self.model.reset_LSTM_hidden()
                self.abs_traj = None
                self.model.zero_grad()

        return r6Losses, poseLosses, totalLosses