示例#1
0
    def __init__(self, opt):
        super(MorphTestData, self).__init__(opt)

        from data.custom_dataset_data_loader import CustomDatasetDataLoader
        data_loader_test = CustomDatasetDataLoader(self._opt,
                                                   is_for_train=False)
        self.data_loader_test = data_loader_test.load_data()
示例#2
0
    def __init__(self):
        self._opt = TrainOptions().parse()
        data_loader_train = CustomDatasetDataLoader(self._opt, is_for_train=True)
        data_loader_test = CustomDatasetDataLoader(self._opt, is_for_train=False)

        self._dataset_train = data_loader_train.load_data()
        self._dataset_test = data_loader_test.load_data()

        self._dataset_train_size = len(data_loader_train)
        self._dataset_test_size = len(data_loader_test)
        print('#train images = %d' % self._dataset_train_size)
        print('#test images = %d' % self._dataset_test_size)

        self._model = ModelsFactory.get_by_name(self._opt.model, self._opt)
        self._tb_visualizer = TBVisualizer(self._opt)

        self._train()
示例#3
0
    def __init__(self):
        self._opt = TrainOptions().parse()
        data_loader_train = CustomDatasetDataLoader(self._opt, is_for_train=True)
        data_loader_test = CustomDatasetDataLoader(self._opt, is_for_train=False)

        self._dataset_train = data_loader_train.load_data()
        self._dataset_test = data_loader_test.load_data()

        self._dataset_train_size = len(data_loader_train)
        self._dataset_test_size = len(data_loader_test)
        print('#train images = %d' % self._dataset_train_size)
        print('#test images = %d' % self._dataset_test_size)

        self._model = ModelsFactory.get_by_name(self._opt.model, self._opt)
        self._tb_visualizer = TBVisualizer(self._opt)

        self._train()
示例#4
0
    def __init__(self):
        self._opt = TrainOptions().parse()
        data_loader_train = CustomDatasetDataLoader(self._opt,
                                                    is_for_train=True)
        data_loader_test = CustomDatasetDataLoader(self._opt,
                                                   is_for_train=False)

        self._dataset_train = data_loader_train.load_data()
        self._dataset_test = data_loader_test.load_data()

        self._dataset_train_size = len(data_loader_train)
        self._dataset_test_size = len(data_loader_test)
        print('#train video clips = %d' % self._dataset_train_size)
        print('#test video clips = %d' % self._dataset_test_size)

        self._model = Impersonator(self._opt)
        self._tb_visualizer = TBVisualizer(self._opt)

        self._train()
示例#5
0
    def __init__(self):
        self._opt = TrainOptions().parse()

        self._model = ModelsFactory.get_by_name(self._opt.model, self._opt)
        self._tb_visualizer = TBVisualizer(self._opt)

        data_loader_train = CustomDatasetDataLoader(self._opt, mode='train')
        data_loader_val = CustomDatasetDataLoader(self._opt, mode='val')
        #data_loader_train = CustomDatasetDataLoader(self._opt, mode='test')
        #data_loader_val = CustomDatasetDataLoader(self._opt, mode='test')

        self._dataset_train = data_loader_train.load_data()
        self._dataset_val = data_loader_val.load_data()

        self._dataset_train_size = len(data_loader_train)
        self._dataset_val_size = len(data_loader_val)
        print('#train images = %d' % self._dataset_train_size)
        print('#val images = %d' % self._dataset_val_size)

        self._train()
示例#6
0
    def __init__(self):
        self._opt = TrainOptions().parse()
        data_loader_train = CustomDatasetDataLoader(self._opt,
                                                    is_for_train=True)
        data_loader_test = CustomDatasetDataLoader(self._opt,
                                                   is_for_train=False)

        self._dataset_train = data_loader_train.load_data()
        self._dataset_test = data_loader_test.load_data()

        self._dataset_train_size = len(data_loader_train)
        self._dataset_test_size = len(data_loader_test)
        print('#train images = %d' % self._dataset_train_size)
        print('#test images = %d' % self._dataset_test_size)
        print('TRAIN IMAGES FOLDER = %s' %
              data_loader_train._dataset._imgs_dir)
        print('TEST IMAGES FOLDER = %s' % data_loader_test._dataset._imgs_dir)

        self._model = ModelsFactory.get_by_name(self._opt.model, self._opt)
        self._tb_visualizer = TBVisualizer(self._opt)
        self._writer = SummaryWriter()

        self._input_imgs = torch.empty(0, 3, self._opt.image_size,
                                       self._opt.image_size)
        self._fake_imgs = torch.empty(0, 3, self._opt.image_size,
                                      self._opt.image_size)
        self._rec_real_imgs = torch.empty(0, 3, self._opt.image_size,
                                          self._opt.image_size)
        self._fake_imgs_unmasked = torch.empty(0, 3, self._opt.image_size,
                                               self._opt.image_size)
        self._fake_imgs_mask = torch.empty(0, 3, self._opt.image_size,
                                           self._opt.image_size)
        self._rec_real_imgs_mask = torch.empty(0, 3, self._opt.image_size,
                                               self._opt.image_size)
        self._cyc_imgs_unmasked = torch.empty(0, 3, self._opt.image_size,
                                              self._opt.image_size)
        self._real_conds = list()
        self._desired_conds = list()

        self._train()
示例#7
0
#!/usr/bin/env python
# coding=utf-8
import time
from options.train_options import TrainOptions  # finish
from data.custom_dataset_data_loader import CustomDatasetDataLoader  # finish
from models.models import create_model  # finish
from util.visualizer import Visualizer  # finish
from util.recorder import Recorder  # finish

opt = TrainOptions().parse()
train_loader = CustomDatasetDataLoader(opt, 'train')
#val_loader    = CustomDatasetDataLoader(opt, 'val')
train_dataset = train_loader.load_data()
#val_dataset   = val_loader.load_data()

dataset_size = len(train_loader)
print(('#training images = %d' % dataset_size))

model = create_model(opt)
visualizer = Visualizer(opt)
recorder = Recorder()
total_steps = 0

for epoch in range(opt.epoch_count, opt.schedule_max + 1):
    epoch_start_time = time.time()
    epoch_iter = 0

    for i, data in enumerate(train_dataset):
        iter_start_time = time.time()
        total_steps += opt.batchSize
        epoch_iter += opt.batchSize
示例#8
0
    def __init__(self):

        # TO GET THEM:
        # clusters_pose_map, clusters_rot_map, clusters_root_rot = self.get_rot_map(self._model.clusters_tensor, torch.zeros((25, 3)).cuda())
        #for i in range(25):
        #    import matplotlib.pyplot
        #    from mpl_toolkits.mplot3d import Axes3D
        #    ax = matplotlib.pyplot.figure().add_subplot(111, projection='3d')
        #    #i = 0
        #    add_group_meshs(ax, cluster_verts[i].cpu().data.numpy(), hand_faces, c='b')
        #    cam_equal_aspect_3d(ax, cluster_verts[i].cpu().data.numpy())
        #    print(i)
        #    matplotlib.pyplot.pause(1)
        #    matplotlib.pyplot.close()

        # FINGER LIMIT ANGLE:
        #self.limit_bigfinger = torch.FloatTensor([1.0222, 0.0996, 0.7302]) # 36:39
        #self.limit_bigfinger = torch.FloatTensor([1.2030, 0.12, 0.25]) # 36:39
        #self.limit_bigfinger = torch.FloatTensor([1.2, -0.4, 0.25]) # 36:39
        self.limit_bigfinger = torch.FloatTensor([1.2, -0.6, 0.25])  # 36:39
        self.limit_index = torch.FloatTensor([-0.0827, -0.4389, 1.5193])  # 0:3
        self.limit_middlefinger = torch.FloatTensor(
            [-2.9802e-08, -7.4506e-09, 1.4932e+00])  # 9:12
        self.limit_fourth = torch.FloatTensor([0.1505, 0.3769,
                                               1.5090])  # 27:30
        self.limit_small = torch.FloatTensor([-0.6235, 0.0275,
                                              1.0519])  # 18:21
        if torch.cuda.is_available():
            self.limit_bigfinger = self.limit_bigfinger.cuda()
            self.limit_index = self.limit_index.cuda()
            self.limit_middlefinger = self.limit_middlefinger.cuda()
            self.limit_fourth = self.limit_fourth.cuda()
            self.limit_small = self.limit_small.cuda()

        self._bigfinger_vertices = [
            697, 698, 699, 700, 701, 702, 703, 704, 705, 706, 707, 708, 709,
            710, 711, 712, 713, 714, 715, 716, 717, 718, 719, 720, 721, 722,
            723, 724, 725, 726, 727, 728, 729, 730, 731, 732, 733, 734, 735,
            736, 737, 738, 739, 740, 741, 742, 743, 744, 745, 746, 747, 748,
            749, 750, 751, 752, 753, 754, 755, 756, 757, 758, 759, 760, 761,
            762, 763, 764, 765, 766, 767, 768
        ]

        self._indexfinger_vertices = [
            46, 47, 48, 49, 56, 57, 58, 59, 86, 87, 133, 134, 155, 156, 164,
            165, 166, 167, 174, 175, 189, 194, 195, 212, 213, 221, 222, 223,
            224, 225, 226, 237, 238, 272, 273, 280, 281, 282, 283, 294, 295,
            296, 297, 298, 299, 300, 301, 302, 303, 304, 305, 306, 307, 308,
            309, 310, 311, 312, 313, 314, 315, 316, 317, 318, 319, 320, 321,
            322, 323, 324, 325, 326, 327, 328, 329, 330, 331, 332, 333, 334,
            335, 336, 337, 338, 339, 340, 341, 342, 343, 344, 345, 346, 347,
            348, 349, 350, 351, 352, 353, 354, 355
        ]

        self._middlefinger_vertices = [
            356, 357, 358, 359, 360, 361, 362, 363, 364, 365, 366, 367, 372,
            373, 374, 375, 376, 377, 381, 382, 385, 386, 387, 388, 389, 390,
            391, 392, 393, 394, 395, 396, 397, 398, 400, 401, 402, 403, 404,
            405, 406, 407, 408, 409, 410, 411, 412, 413, 414, 415, 416, 417,
            418, 419, 420, 421, 422, 423, 424, 425, 426, 427, 428, 429, 430,
            431, 432, 433, 434, 435, 436, 437, 438, 439, 440, 441, 442, 443,
            444, 445, 446, 447, 448, 449, 450, 451, 452, 453, 454, 455, 456,
            457, 458, 459, 460, 461, 462, 463, 464, 465, 466, 467
        ]

        self._fourthfinger_vertices = [
            468, 469, 470, 471, 472, 473, 474, 475, 476, 477, 478, 479, 482,
            483, 484, 485, 486, 487, 491, 492, 495, 496, 497, 498, 499, 500,
            501, 502, 503, 504, 505, 506, 507, 508, 511, 512, 513, 514, 515,
            516, 517, 518, 519, 520, 521, 522, 523, 524, 525, 526, 527, 528,
            529, 530, 531, 532, 533, 534, 535, 536, 537, 538, 539, 540, 541,
            542, 543, 544, 545, 546, 547, 548, 549, 550, 551, 552, 553, 554,
            555, 556, 557, 558, 559, 560, 561, 562, 563, 564, 565, 566, 567,
            568, 569, 570, 571, 572, 573, 574, 575, 576, 577, 578
        ]

        self._smallfinger_vertices = [
            580, 581, 582, 583, 584, 585, 586, 587, 588, 589, 590, 591, 598,
            599, 600, 601, 602, 603, 609, 610, 613, 614, 615, 616, 617, 618,
            619, 620, 621, 622, 623, 624, 625, 626, 628, 629, 630, 631, 632,
            633, 634, 635, 636, 637, 638, 639, 640, 641, 642, 643, 644, 645,
            646, 647, 648, 649, 650, 651, 652, 653, 654, 655, 656, 657, 658,
            659, 660, 661, 662, 663, 664, 665, 666, 667, 668, 669, 670, 671,
            672, 673, 674, 675, 676, 677, 678, 679, 680, 681, 682, 683, 684,
            685, 686, 687, 688, 689, 690, 691, 692, 693, 694, 695
        ]

        self._opt = TestOptions().parse()
        #assert self._opt.load_epoch > 0, 'Use command --load_epoch to indicate the epoch you want to load - and choose a trained model'

        # Let's set batch size at 2 since we're only getting one image so far
        self._opt.batch_size = 1

        self._opt.n_threads_train = self._opt.n_threads_test
        data_loader_test = CustomDatasetDataLoader(self._opt, mode='test')
        self._dataset_test = data_loader_test.load_data()
        self._dataset_test_size = len(data_loader_test)
        print('#test images = %d' % self._dataset_test_size)

        self._model = ModelsFactory.get_by_name(self._opt.model, self._opt)
        self._tb_visualizer = TBVisualizer(self._opt)

        self._total_steps = self._dataset_test_size
        self._display_visualizer_test(20, self._total_steps)
示例#9
0
from tqdm import tqdm
import numpy as np
import torch
from models.base_model import BaseModel


opt = TestOptions().parse()
opt.nThreads = 1
opt.batchSize = 1
opt.serial_batches = True
opt.no_flip = True
opt.isTrain = False
opt.max_dataset_size = float("inf")

data_loader = CustomDatasetDataLoader(opt)
dataset = data_loader.load_data()

model = BaseModel(opt)

L1s = []
SSIMs = []
with torch.no_grad():
    for idx, data in enumerate(tqdm(dataset)):
        ida = data['id_a'][0].split('_')
        idb = data['id_b'][0].split('_')

        assert (ida[0] == idb[0])
        model_id = ida[0]
        ida = '_'.join(ida[1:])
        idb = '_'.join(idb[1:])
示例#10
0
def main_task():

    # define params
    opt = BaseOptions().parse()
    iter_path = os.path.join(opt.checkpoints_dir, 'iter.txt')
    ioupath_path = os.path.join(opt.checkpoints_dir, 'MIoU.txt')

    # load training data
    if opt.continue_train:
        try:
            start_epoch, epoch_iter = np.loadtxt(iter_path,
                                                 delimiter=',',
                                                 dtype=int)
        except:
            start_epoch, epoch_iter = 1, 0
        try:
            best_iou = np.loadtxt(ioupath_path, dtype=float)
        except:
            best_iou = 0.
    else:
        start_epoch, epoch_iter = 1, 0
        best_iou = 0.

    os.environ["CUDA_VISIBLE_DEVICES"] = str(opt.gpu_ids[0])

    # define data mode
    data_loader = CustomDatasetDataLoader()
    data_loader.initialize(opt)
    dataset, dataset_val = data_loader.load_data()
    dataset_size = len(dataset)

    # define model
    model = Deeplab_Solver(opt)
    total_steps = (start_epoch - 1) * dataset_size + epoch_iter

    print("starting training model......")

    for epoch in range(start_epoch, opt.nepochs):
        if epoch != start_epoch:
            epoch_iter = epoch_iter % dataset_size

        # for train
        opt.isTrain = True
        model.model.train()
        for i, data in enumerate(dataset, start=epoch_iter):
            total_steps += opt.batchSize
            epoch_iter += opt.batchSize

            # keep time to watch how times each one epoch
            epoch_start_time = time.time()

            # forward and backward pass
            model.forward(data, isTrain=True)
            model.backward(total_steps,
                           opt.nepochs * dataset_size * opt.batchSize + 1)

            # save latest model
            if total_steps % opt.save_latest_freq == 0:
                print('saving the latest model (epoch %d, total_steps %d)' %
                      (epoch, total_steps))
                model.save('latest')
                np.savetxt(iter_path, (epoch, epoch_iter),
                           delimiter=',',
                           fmt='%d')

        if model.trainingavgloss < 0.010:
            break

        # for eval
        opt.isTrain = False
        model.model.eval()
        if dataset_val != None:
            label_trues, labels_preds = [], []
            for i, data in enumerate(dataset_val):
                seggt, segpred = model.forward(data, isTrain=False)
                seggt = seggt.data.cpu().numpy()
                segpred = segpred.data.cpu().numpy()

                label_trues.append(seggt)
                labels_preds.append(segpred)

            metrics = util.label_accuracy_score(label_trues,
                                                labels_preds,
                                                n_class=opt.label_nc)
            metrics *= 100
            print('''\
                    Validation:
                    Accuracy: {0}
                    AccuracyClass: {1}
                    MeanIOU: {2}
                    FWAVAccuracy: {3}
                    '''.format(*metrics))

            # save model for best
            if metrics[2] > best_iou:
                best_iou = metrics[2]
                model.save('best')

            print('end of epoch %d / %d \t Time Taken: %d sec' %
                  (epoch + 1, opt.nepochs, time.time() - epoch_start_time))
示例#11
0
from options.train_options import TrainOptions
from data.custom_dataset_data_loader import CustomDatasetDataLoader
from util.visualizer import Visualizer
import copy
from tqdm import tqdm
import numpy as np
import torch
from models.base_model import BaseModel


torch.manual_seed(0)

opt = TrainOptions().parse()

data_loader = CustomDatasetDataLoader(opt)
dataset = data_loader.load_data()


opt_for_eval = copy.deepcopy(opt)
opt_for_eval.isTrain = False
opt_for_eval.max_dataset_size = 1000
val_loader = CustomDatasetDataLoader(opt_for_eval)
valset = val_loader.load_data()

dataset_size = len(data_loader)
print('#training samples = %d' % dataset_size)

model = BaseModel(opt)

visualizer = Visualizer(opt)
total_steps = 0