def initModelOptimizer(self, args):

        print(f'Initializing models & optimizer... \r', end='')
        self.best = 1e6
        self.best_rmse = 1e6
        self.best_mae = 1e6
        self.best_rmse_index = 0
        self.best_mae_index = 0
        self.best_index = 0

        with utility.Timer() as t:
            str = args['model']['arch']
            inch = args['data']['inChannel']

            model = models.__dict__[str](inch, inch)

            # print(model)
            # model = models.__dict__[args['model']['arch']]((50, 64), 100, [100], [(3, 3)], 1)

            model.apply(self.weights_init)
            model = torch.nn.DataParallel(model).cuda()

            if args['model']['optimizer'] in ['Adam', 'SGD', 'RMSprop']:
                optimizer = torch.optim.__dict__[args['model']['optimizer']](
                    # filter(lambda p: p.requires_grad, model.parameters()),
                    params=model.parameters(),
                    lr=args['model']['learningRate'],
                    weight_decay=args['model']['weightDecay'])

            self.cs = nn.BCELoss(reduction='sum').cuda()
            self.l2 = nn.MSELoss(reduction='sum').cuda()
            self.l1 = nn.L1Loss(reduction='sum').cuda()
        print('Model [%s] & Optimizer [%s] initialized. %.2fs' % (
            args['model']['arch'], args['model']['optimizer'], t.interval))
        return model, optimizer
示例#2
0
    def train(self):
        # self.args.cpu = False

        self.loss.step()
        epoch = self.optimizer.get_last_epoch() + 1
        lr = self.optimizer.get_lr()

        self.ckp.write_log('[Epoch {}]\tLearning rate: {:.2e}'.format(
            epoch, Decimal(lr)))
        self.loss.start_log()
        self.model.train()

        timer_data, timer_model = utility.Timer(), utility.Timer()

        self.loader_train.dataset.set_scale(0)
        for batch, (
                lr,
                hr,
                _,
        ) in enumerate(self.loader_train):
            lr, hr = self.prepare(lr, hr)
            timer_data.hold()
            timer_model.tic()

            self.optimizer.zero_grad()
            sr = self.model(lr, 0)
            loss = self.loss(sr, hr)
            loss.backward()
            if self.args.gclip > 0:
                utils.clip_grad_value_(self.model.parameters(),
                                       self.args.gclip)
            self.optimizer.step()

            timer_model.hold()

            if (batch + 1) % self.args.print_every == 0:
                self.ckp.write_log('[{}/{}]\t{}\t{:.1f}+{:.1f}s'.format(
                    (batch + 1) * self.args.batch_size,
                    len(self.loader_train.dataset),
                    self.loss.display_loss(batch), timer_model.release(),
                    timer_data.release()))

            timer_data.tic()

        self.loss.end_log(len(self.loader_train))
        self.error_last = self.loss.log[-1, -1]
        self.optimizer.schedule()
示例#3
0
def trainRibTracerDDPG(params):
    trainDataset = data.RibTraceDDPGDataset(params.data_dir, params.addi_path,
                                            params, True)
    valDataset = data.RibTraceDDPGDataset(params.val_data_set,
                                          params.val_addi_path, params)

    ribTracer = ddpg.RibTracerDDPG(params)
    if params.continueModel != "None":
        ribTracer.loadWeights()
    if params.useGPU:
        ribTracer = ribTracer.cuda()

    timer = utility.Timer()
    for epochID in range(1, params.numEpochs + 1):
        total_reward = 0
        total_len = 0
        cnt = 0
        ribTracer.train()
        for img, poly in trainDataset:
            reward, track = ribTracer.play(img, poly, poly[1] - poly[0], True)
            if cnt % 20 == 0:
                vloss = ribTracer.total_value_loss.cpu().numpy(
                ) / ribTracer.update_cnt
                ploss = ribTracer.total_policy_loss.cpu().numpy(
                ) / ribTracer.update_cnt
                print(
                    f"Batch {cnt}: Reward {reward} Len {len(track)} VLoss{vloss} PLoss{ploss} {timer()}"
                )
            total_reward += reward
            total_len += len(track)
            cnt += 1
        ribTracer.eval()
        ribTracer.saveWeights()
        test_reward = 0
        test_len = 0
        test_cnt = 0
        for img, poly in valDataset:
            reward, track = ribTracer.play(img, poly, poly[1] - poly[0], False)
            test_reward += reward
            test_len += len(track)
            test_cnt += 1
        print(f"Epoch {epochID}: {timer()}")
        print(f"Train: {total_reward / cnt} {total_len/cnt}")
        print(f"Val: {test_reward / test_cnt} {test_len/test_cnt}")
示例#4
0
def take_to_bob_text(prefix):
    timer = utility.Timer()

    logger.info('loading take from "{prefix}": {delta:.4f}'.format(
        prefix=prefix, delta=timer.elapsed()))

    with open('{}/data.mStream'.format(prefix), 'rb') as f:
        info, node_list, data = shadow.fileio.read_stream(f)

    logger.info('read take stream: {delta:.4f}'.format(delta=timer.elapsed()))

    #
    # Read the mTake file to get the node string ids. Create a name
    # mapping for all of the nodes and the data fields that are present in the
    # take data stream. For example, create something like:
    #
    #   node_map['Hips']['Gq'] = (0, 4)
    #
    # Which can be used to index into the big array of data loaded from the
    # take.
    #
    with open('{}/take.mTake'.format(prefix)) as f:
        node_map = shadow.fileio.make_node_map(f, node_list)

    logger.info('create named data base and bounds: {delta:.4f}'.format(
        delta=timer.elapsed()))

    #
    # The take stream data is arranged as a flat 1D array.
    #   [ax0 ay0 az0 ... axN ayN azN]
    # Use the reshape function access as 2D array with rows as time.
    #   [[ax0 ay0 az0],
    #    ...
    #    [axN ayN azN]]
    #
    stride = int(info['frame_stride'] / 4)
    num_frame = int(info['num_frame'])

    y = np.reshape(np.array(data), (num_frame, stride))

    # Time in seconds.
    h = info.get('h', 0.01)
    x = np.arange(0, (num_frame - 1) * h, h)
    # x = y[:, range(*node_map['Body']['timestamp'])].flatten()
    # x = x - x[0]

    logger.info('copy data to numpy.array x and y: {delta:.4f}'.format(
        delta=timer.elapsed()))

    # Name mapping from Shadow to BoB joint
    shadow_to_bob = {
        'Hips': 'pelvis',
        'Chest': 'lumbar_joint',  # thorax_l1
        'Head': 'neck_joint',  # skull_c1
        'LeftThigh': 'left_hip',
        'LeftLeg': 'left_knee',
        'LeftFoot': 'left_ankle',
        'LeftShoulder': 'left_sc_joint',
        'LeftArm': 'left_shoulder',
        'LeftForearm': 'left_elbow',
        'LeftHand': 'left_wrist',
        'RightThigh': 'right_hip',
        'RightLeg': 'right_knee',
        'RightFoot': 'right_ankle',
        'RightShoulder': 'right_sc_joint',
        'RightArm': 'right_shoulder',
        'RightForearm': 'right_elbow',
        'RightHand': 'right_wrist'
    }

    # Rotation of each BoB joint in the skeleton definition. Used to change
    # the rotation coordinate system of the Shadow data as we copy it to the
    # BoB skeleton.
    root_2_over_2 = np.sqrt(2) / 2
    pre_rotate = {
        'LeftLeg': [0, 1, 0, 0],
        'LeftFoot': [root_2_over_2, root_2_over_2, 0, 0],
        'RightLeg': [0, 1, 0, 0],
        'RightFoot': [root_2_over_2, root_2_over_2, 0, 0],
        'Chest': [0.9588, -0.2840, 0, 0],
        'Head': [0.9981, 0.0610, 0, 0],
        'LeftShoulder': [0.2179, -0.6727, -0.6942, 0.1342],
        'LeftArm': [0.0062, 0.7071, 0.7071, -0.0062],
        'LeftForearm': [0.0062, 0.7071, 0.7071, -0.0062],
        'LeftHand': [0.0062, 0.7071, 0.7071, -0.0062],
        'RightShoulder': [0.6942, -0.1342, -0.2179, 0.6727],
        'RightArm': [0.0062, 0.7071, -0.7071, 0.0062],
        'RightForearm': [0.0062, 0.7071, -0.7071, 0.0062],
        'RightHand': [0.0062, 0.7071, -0.7071, 0.0062]
    }

    # Name and index ordering to convert take data to BoB text format.
    channel_order = {'x': 2, 'y': 0, 'z': 1}

    # Store data as named channels.
    # data['pelvis']['rotx'] = {...}
    data = {}

    for key in shadow_to_bob:
        name = shadow_to_bob[key]
        joint = {}

        if key in pre_rotate:
            pre = quaternion.as_quat_array(
                np.full((num_frame, 4), pre_rotate[key]))

            # Local quaternion. In the joint coordinate frame.
            Lq = quaternion.as_quat_array(y[:, range(*node_map[key]['Lq'])])
            # Change rotation frame to match BoB skeleton.
            Lq = pre * Lq * pre.conjugate()
            # Convert to X-Y-Z Euler angle set.
            rot = np.rad2deg(quaternion_to_euler(Lq))
        else:
            # No need to change coordinate frame. Use the X-Y-Z Euler angle set
            # directly from the Shadow skeleton.
            rot = np.rad2deg(y[:, range(*node_map[key]['r'])])

        for channel_key in channel_order:
            channel = 'rot{}'.format(channel_key)
            joint[channel] = rot[:, channel_order[channel_key]]

        if key == 'Hips':
            # World space translation. Shadow is in cm, BoB in m.
            trans = y[:, range(*node_map[key]['c'])] * 0.01
            for channel_key in channel_order:
                channel = 'trans{}'.format(channel_key)
                joint[channel] = trans[:, 1 + channel_order[channel_key]]

        data[name] = joint

    logger.info('convert rotate and translate data: {delta:.4f}'.format(
        delta=timer.elapsed()))

    #
    # Write out data in plain text format.
    #
    time_str = ' '.join(map(str, x))
    with open('{}/data.txt'.format(prefix), 'w') as f:
        for name in data:
            f.write('% {}\n'.format(name))

            f.write('{}.time=[{}];\n'.format(name, time_str))

            for channel in data[name]:
                f.write('{}.{}=[{}];\n'.format(
                    name, channel, ' '.join(map(str, data[name][channel]))))

            f.write('\n')

    logger.info('total time: {delta:.4f}'.format(delta=timer.total()))
示例#5
0
    def test(self):

        # self.args.cpu = True

        torch.set_grad_enabled(False)

        epoch = self.optimizer.get_last_epoch()

        self.ckp.write_log('\nEvaluation:')
        self.ckp.add_log(torch.zeros(1, len(self.loader_test),
                                     len(self.scale)))
        self.model.zero_grad()
        self.model.eval()

        timer_test = utility.Timer()
        if self.args.save_results:
            self.ckp.begin_background()

        for idx_data, d in enumerate(self.loader_test):
            for idx_scale, scale in enumerate(self.scale):
                ssim_total = 0
                psnr = 0
                d.dataset.set_scale(idx_scale)

                for lr, hr, filename in tqdm(d, ncols=80):
                    torch.cuda.empty_cache()
                    gc.collect()

                    lr, hr = self.prepare(lr, hr)

                    sr = self.model(lr, idx_scale)

                    if self.args.use_lab:
                        hr = colors.lab_to_rgb(hr) * self.args.rgb_range
                        lr = colors.lab_to_rgb(lr) * self.args.rgb_range
                        sr = colors.lab_to_rgb(sr) * self.args.rgb_range

                    sr = utility.quantize(sr, self.args.rgb_range)

                    save_list = [sr]
                    self.ckp.log[-1, idx_data, idx_scale] += utility.calc_psnr(
                        sr, hr, scale, self.args.rgb_range, dataset=d)

                    psnr += utility.calc_psnr(sr,
                                              hr,
                                              scale,
                                              self.args.rgb_range,
                                              dataset=d)
                    ssim_total += utility.calc_ssim(
                        sr, hr, self.args.rgb_range).item()

                    if self.args.save_gt:
                        save_list.extend([lr, hr])

                    if self.args.save_results:
                        self.ckp.save_results(d, filename[0], save_list, scale)

                self.ckp.log[-1, idx_data, idx_scale] /= len(d)
                psnr /= len(d)
                best = self.ckp.log.max(0)
                """"
                self.ckp.write_log(
                    '[{} x{}]\tPSNR: {:.3f} (Best: {:.3f} @epoch {})'.format(
                        d.dataset.name,
                        scale,
                        self.ckp.log[-1, idx_data, idx_scale],
                        best[0][idx_data, idx_scale],
                        best[1][idx_data, idx_scale] + 1
                    )
                )
                """
                print(psnr)
                ssim_val = ssim_total / len(d)
                self.ckp.write_log('[{} x{}]\tSSIM: {:.3f}'.format(
                    d.dataset.name, scale, ssim_val))

        self.ckp.write_log('Forward: {:.2f}s\n'.format(timer_test.toc()))
        self.ckp.write_log('Saving...')

        if self.args.save_results:
            self.ckp.end_background()

        if not self.args.test_only:
            self.ckp.save(self, epoch, is_best=(best[1][0, 0] + 1 == epoch))

        self.ckp.write_log('Total: {:.2f}s\n'.format(timer_test.toc()),
                           refresh=True)

        torch.set_grad_enabled(True)
示例#6
0
def trainRibTracer(params):
    trainDataset = data.RibTraceDataset(params.data_dir, params.addi_path,
                                        params, True)
    valDataset = data.RibTraceDataset(params.val_data_set,
                                      params.val_addi_path, params)

    ribTracer = model.RibTracer(params)
    if params.continueModel != "None":
        d = torch.load(params.continueModel)
        ribTracer.load_state_dict(d)
    if params.useGPU:
        ribTracer = ribTracer.cuda()

    loss_fn = torch.nn.L1Loss()
    optimizer = torch.optim.Adam(ribTracer.parameters(),
                                 lr=params.learningRate)
    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,
                                                           verbose=True)

    def calcLoss(dataset):
        total = 0
        cnt = 0
        for batch, label in DataLoader(dataset,
                                       batch_size=params.batchSize,
                                       pin_memory=True,
                                       num_workers=2):
            if params.useGPU:
                batch = batch.cuda()
                label = label.cuda()
            with torch.no_grad():
                out = ribTracer(batch)
                loss = loss_fn(out, label)
            l = torch.sum(loss).cpu()
            total += l
            cnt += 1

        return float(total) / float(cnt)

    train_loss_rec = []
    val_loss_rec = []
    timer = utility.Timer()
    # torch.autograd.set_detect_anomaly(True)
    for epochID in range(1, params.numEpochs + 1):
        print(f"Start Epoch {epochID}")
        cnt = 0
        ribTracer.train()
        for batch, label in DataLoader(trainDataset,
                                       batch_size=params.batchSize,
                                       pin_memory=True,
                                       num_workers=2):
            optimizer.zero_grad()
            if params.useGPU:
                batch = batch.cuda()
                label = label.cuda()
            out = ribTracer(batch)
            loss = loss_fn(out, label)
            loss.backward()
            optimizer.step()

            if cnt % 100 == 0:
                print(f"Batch {cnt}: {loss} {timer()}")
            cnt += 1

        ribTracer.eval()
        trainloss = calcLoss(trainDataset)
        valloss = calcLoss(valDataset)
        train_loss_rec.append(trainloss)
        val_loss_rec.append(valloss)
        scheduler.step(trainloss)

        torch.save(ribTracer.state_dict(),
                   os.path.join(params.model_path, "ribTracer.pt"))

        print(f"Epoch {epochID}: {timer()} Train: {trainloss} Val: {valloss}")
示例#7
0
def trainVAE(params):
    trainDataset = data.VAEDataset(params.data_dir, params, True)
    valDataset = data.VAEDataset(params.val_data_set, params, True)

    VAEmodel = model.VAE(params)
    if params.useGPU:
        VAEmodel = VAEmodel.cuda()
    loss_fn = torch.nn.SmoothL1Loss()
    optimizer = torch.optim.Adam(VAEmodel.parameters(), lr=params.learningRate)

    def calcLoss(dataset):
        total = 0
        cnt = 0
        for batch in DataLoader(dataset,
                                batch_size=params.batchSize,
                                pin_memory=True,
                                num_workers=4):
            if params.useGPU:
                batch = batch.cuda()
            with torch.no_grad():
                out = VAEmodel(batch)
                loss = loss_fn(out, batch)
            l = torch.sum(loss).cpu()
            total += l
            cnt += 1

        return float(total) / float(cnt)

    timer = utility.Timer()
    # torch.autograd.set_detect_anomaly(True)
    for epochID in range(1, params.numEpochs + 1):
        print(f"Start Epoch {epochID}")
        cnt = 0
        VAEmodel.train()
        trainloss = 0
        traintotal = 0
        for batch in DataLoader(trainDataset,
                                batch_size=params.batchSize,
                                pin_memory=True,
                                num_workers=4):
            optimizer.zero_grad()
            if params.useGPU:
                batch = batch.cuda()
            out = VAEmodel(batch)
            loss = loss_fn(out, batch)
            loss.backward()
            trainloss += loss.detach().cpu().numpy()
            traintotal += 1
            optimizer.step()

            if cnt % 100 == 0:
                print(f"Batch {cnt}: {loss} {timer()}")
            cnt += 1

        VAEmodel.eval()
        trainloss /= traintotal
        # trainloss = calcLoss(trainDataset)
        valloss = calcLoss(valDataset)

        torch.save(VAEmodel.state_dict(),
                   os.path.join(params.model_path, "VAE.pt"))
        torch.save(VAEmodel.conv.state_dict(),
                   os.path.join(params.model_path, "ribTracerObserver.pt"))

        print(f"Epoch {epochID}: {timer()} Train: {trainloss} Val: {valloss}")