コード例 #1
0
    def _load_data_set(self):
        self.images = []
        self.kp2ds = []
        self.boxs = []
        clk = Clock()
        print('start loading coco 2017 dataset.')
        #anno_file_path = os.path.join(self.data_folder, 'annotations', 'person_keypoints_train2017.json')
        anno_file_path = os.path.join(self.data_folder, 'annotations',
                                      'person_keypoints_val2017.json')

        with open(anno_file_path, 'r') as reader:
            anno = json.load(reader)

        def _hash_image_id_(image_id_to_info, coco_images_info):
            for image_info in coco_images_info:
                image_id = image_info['id']
                image_name = image_info['file_name']
                _anno = {}
                #_anno['image_path'] = os.path.join(self.data_folder, 'images', 'train-valid2017', image_name)
                #_anno['image_path'] = os.path.join(self.data_folder, 'images', 'train2017', image_name)
                _anno['image_path'] = os.path.join(self.data_folder, 'images',
                                                   'val2017', image_name)

                _anno['kps'] = []
                _anno['box'] = []
                assert not (image_id in image_id_to_info)
                image_id_to_info[image_id] = _anno

        images = anno['images']

        image_id_to_info = {}
        _hash_image_id_(image_id_to_info, images)

        annos = anno['annotations']
        for anno_info in annos:
            self._handle_anno_info(anno_info, image_id_to_info)

        for k, v in image_id_to_info.items():
            self._handle_image_info_(v)

        print('finished load coco 2017 dataset, total {} samples.'.format(
            len(self.images)))

        clk.stop()
コード例 #2
0
from multiprocessing import Process, Array
from time import sleep
from timer import Clock

if __name__ == '__main__':
    timer = Clock()
    time = Array("i", 2)

    clocker = Process(target=timer.increment, args=(time,))
    clocker.start()
    sleep(4)
    print(str(time[0]).zfill(2)+":"+str(time[1]).zfill(2))
    clocker.join()
コード例 #3
0
ファイル: train.py プロジェクト: xspin/pda
        tgt_dis_labels = torch.ones(config.batch_size, dtype=torch.long)

    print('Initializing network ...')
    if config.is_cuda:
        net = Net(config).cuda()
    else:
        net = Net(config)
    criterion_label = torch.nn.CrossEntropyLoss()
    criterion_domain = torch.nn.CrossEntropyLoss()
    # optimizer_F = torch.optim.SGD(net.feature_extractor.parameters(), lr=0.001, momentum=0.9)
    # optimizer_D = torch.optim.SGD(net.adversarial_classifier.parameters(), lr=0.001, momentum=0.9)
    optimizer_F = torch.optim.Adam(net.feature_extractor.parameters())
    optimizer_D = torch.optim.Adam(net.adversarial_classifier.parameters())

    print('Starting training ...')
    clock_epoch = Clock(config.epochs)
    for epoch in range(config.epochs):
        print(' Epoch {}/{}'.format(epoch + 1, config.epochs))
        step = 0
        clock_batch = Clock(len(src_dataloader))
        for src_data, tgt_data in zip(src_dataloader, cycle(tgt_dataloader)):
            print('    Batch {}/{}'.format(step + 1, len(src_dataloader)))
            src_inputs, src_labels = src_data
            tgt_inputs, tgt_labels = tgt_data
            # src_inputs = torch.autograd.Variable(src_inputs)
            # tgt_inputs = torch.autograd.Variable(tgt_inputs)
            if config.is_cuda:
                src_inputs, src_labels = src_inputs.cuda(), src_labels.cuda()
                tgt_inputs, tgt_labels = tgt_inputs.cuda(), tgt_labels.cuda()
            else:
                src_inputs, src_labels = src_inputs, src_labels