def make_data(data, labels, output_dir, prefix):
    image_dir = prefix + '_images/'
    mkdir_if_missing(osp.join(output_dir, image_dir))
    file_label_list = []
    num = len(data)
    for i in xrange(num):
        img = np.rollaxis(data[i, :].reshape((3, 32, 32)), 0, 3)
        filename = '{:05d}.jpg'.format(i)
        imsave(osp.join(output_dir, image_dir, filename), img)
        file_label_list.append('{} {}'.format(
            osp.join(image_dir, filename), int(labels[i])))
    write_list(file_label_list, osp.join(output_dir, prefix + '.txt'))
def main(args):
    mkdir_if_missing(args.output_dir)
    # training data
    data = []
    labels = []
    for i in xrange(1, 6):
        dic = unpickle(osp.join(args.data_root, 'data_batch_{}'.format(i)))
        data.append(dic['data'])
        labels = np.r_[labels, dic['labels']]
    data = np.vstack(data)
    make_data(data, labels, args.output_dir, 'train')
    # test data
    dic = unpickle(osp.join(args.data_root, 'test_batch'))
    make_data(dic['data'], dic['labels'], args.output_dir, 'test')
Ejemplo n.º 3
0
    def _download_data(self):
        if osp.exists(self.root):
            print("This dataset has been downloaded.")
            return

        mkdir_if_missing(self.root)
        fpath = osp.join(self.root, osp.basename(self.dataset_url))

        print("Downloading iLIDS-VID dataset")
        url_opener = urllib.URLopener()
        url_opener.retrieve(self.dataset_url, fpath)

        print("Extracting files")
        tar = tarfile.open(fpath)
        tar.extractall(path=self.root)
        tar.close()
    def _download_data(self):
        if osp.exists(self.dataset_dir):
            print("This dataset has been downloaded.")
            return

        print("Creating directory {}".format(self.dataset_dir))
        mkdir_if_missing(self.dataset_dir)
        fpath = osp.join(self.dataset_dir, osp.basename(self.dataset_url))

        print("Downloading DukeMTMC-VideoReID dataset")
        url_opener = urllib.URLopener()
        url_opener.retrieve(self.dataset_url, fpath)

        print("Extracting files")
        zip_ref = zipfile.ZipFile(fpath, 'r')
        zip_ref.extractall(self.dataset_dir)
        zip_ref.close()
Ejemplo n.º 5
0
def main(args):
    output_dir = args.output_dir
    mkdir_if_missing(osp.join(args.output_dir, 'cam_0'))
    mkdir_if_missing(osp.join(args.output_dir, 'cam_1'))
    # Collect the person_id and view_id into dict
    images = glob(osp.join(args.input_dir, 'RGB', '*.bmp'))
    pdict = defaultdict(lambda: defaultdict(list))
    for imname in images:
        pid, vid = osp.basename(imname).split('_')[0:2]
        pdict[pid][vid].append(imname)
    # Randomly choose half of the views as cam_0, others as cam_1
    identities = []
    for i, pid in enumerate(pdict):
        vids = pdict[pid].keys()
        num_views = len(vids)
        np.random.shuffle(vids)
        p_images = [[], []]
        for vid in vids[:(num_views // 2)]:
            for src_file in pdict[pid][vid]:
                tgt_file = 'cam_0/{:05d}_{:05d}.bmp'.format(
                    i, len(p_images[0]))
                shutil.copy(src_file, osp.join(args.output_dir, tgt_file))
                p_images[0].append(tgt_file)
        for vid in vids[(num_views // 2):]:
            for src_file in pdict[pid][vid]:
                tgt_file = 'cam_1/{:05d}_{:05d}.bmp'.format(
                    i, len(p_images[1]))
                shutil.copy(src_file, osp.join(args.output_dir, tgt_file))
                p_images[1].append(tgt_file)
        identities.append(p_images)
    # Save meta information into a json file
    meta = {'name': '3DPeS', 'shot': 'multiple', 'num_cameras': 2}
    meta['identities'] = identities
    write_json(meta, osp.join(args.output_dir, 'meta.json'))
    # Randomly create a training and test split
    num = len(identities)
    pids = np.random.permutation(num)
    trainval_pids = sorted(pids[100:])
    test_pids = sorted(pids[:100])
    split = {
        'trainval': trainval_pids,
        'test_probe': test_pids,
        'test_gallery': test_pids
    }
    write_json(split, osp.join(output_dir, 'split.json'))
Ejemplo n.º 6
0
def main(args):
    meta = read_json(osp.join(args.dataset_dir, 'meta.json'))
    split = read_json(osp.join(args.dataset_dir, 'split.json'))
    identities = np.asarray(meta['identities'])
    # Make train / val. Consider single or multiple shot.
    trainval = identities[split['trainval']]
    if meta['shot'] == 'single':
        # When single shot, to ensure each identity has at least one training
        # image, we first randomly choose validation identities, then randomly
        # split their views equally for training and validation.
        num_val = int(len(trainval) * args.val_ratio) * 2
        np.random.shuffle(trainval)
        train = list(trainval[num_val:])
        val = []
        for views in trainval[:num_val]:
            np.random.shuffle(views)
            train.append(views[:len(views) // 2])
            val.append(views[len(views) // 2:])
        train = _get_list(train)
        val = _get_list(val)
    else:
        # When multiple shots, we just randomly split the trainval images
        trainval = _get_list(trainval)
        np.random.shuffle(trainval)
        num_val = int(len(trainval) * args.val_ratio)
        train = trainval[num_val:]
        val = trainval[:num_val]
    # Make test probe / gallery. Probe identities should be a subset of
    # gallery's. First half views are probe, others are gallery.
    assert len(set(split['test_probe']) - set(split['test_gallery'])) == 0
    test_probe, test_gallery = [], []
    for views in identities[split['test_probe']]:
        test_probe.append(views[:len(views) // 2])
        test_gallery.append(views[len(views) // 2:])
    only_in_gallery = list(
        set(split['test_gallery']) - set(split['test_probe']))
    test_gallery.extend(identities[only_in_gallery])
    test_probe = _get_list(test_probe)
    test_gallery = _get_list(test_gallery)
    # Save to files
    mkdir_if_missing(args.output_dir)
    _save(train, osp.join(args.output_dir, 'train.txt'))
    _save(val, osp.join(args.output_dir, 'val.txt'))
    _save(test_probe, osp.join(args.output_dir, 'test_probe.txt'))
    _save(test_gallery, osp.join(args.output_dir, 'test_gallery.txt'))
Ejemplo n.º 7
0
 def __init__(self, params):
     self.params = params
     self.starting_time = time.time()
     self.elapsed_time = 0
     utils.mkdir_if_missing(self.params.MODEL_SAVE_DIR)
     self.val_datagen = datagen.SequenceDataGenerator(
         num_shape = self.params.NUM_SHAPE,
         image_size = self.params.IMG_SIZE,
         sequence_len = self.params.SEQUENCE_LEN,
         random_size = True,
         rotate_shapes = self.params.ROTATE_SHAPES
         )
     self.train_data_loader = dataset.SequenceDataLoader(
         dataset_path=self.params.TRAIN_SET_PATH, shuffle=True)
     self.val_data_loader   = dataset.SequenceDataLoader(
         dataset_path=self.params.VAL_SET_PATH,   shuffle=False)
     self.test_data_loader  = dataset.SequenceDataLoader(
         dataset_path=self.params.TEST_SET_PATH,  shuffle=False)
Ejemplo n.º 8
0
def train_teacher():
    """
    This function trains a teacher (teacher id) among an ensemble of nb_teachers
    models for the dataset specified.
    :param dataset: string corresponding to dataset (svhn, cifar10)
    :param nb_teachers: total number of teachers in the ensemble
    :param teacher_id: id of the teacher being trained
    :return: True if everything went well
    """
    # If working directories do not exist, create them
    #assert utils.mkdir_if_missing(config.data_dir)
    #assert utils.mkdir_if_misshing(config.train_dir)
    print("Initializing dataset {}".format(config.dataset))

    dataset = data_manager.init_img_dataset(
        root=config.data_dir, name=config.dataset,
    )

    # Load the dataset


    for i in range(0,config.nb_teachers):
        # Retrieve subset of data for this teacher

        if config.dataset == 'celeba':
            data, labels = dataset._data_partition(config.nb_teachers,i)

       
        print("Length of training data: " + str(len(data)))

        # Define teacher checkpoint filename and full path
        print('data.shape for each teacher')


        dir_path = os.path.join(config.save_model,'pate_'+config.dataset+str(config.nb_teachers))
        utils.mkdir_if_missing(dir_path)
        #filename = os.path.join(dir_path, str(config.nb_teachers) + '_teachers_' + str(i) + '_resnet.checkpoint.pth.tar')
        filename = os.path.join(dir_path, str(config.nb_teachers) + '_teachers_' + str(i) + config.arch+'.checkpoint.pth.tar')
        print('save_path for teacher{}  is {}'.format(i,filename))

        network.train_each_teacher(config.teacher_epoch,data, labels, dataset.test_data, dataset.test_label, filename)


    return True
Ejemplo n.º 9
0
Archivo: raw.py Proyecto: msoliman6/MVA
def main(args):
    mkdir_if_missing(args.output_dir)
    # training data
    data = []
    labels = []
    for i in xrange(1, 6):
        dic = unpickle(osp.join(args.data_root, 'data_batch_{}'.format(i)))
        data.append(dic['data'])
        labels = np.r_[labels, dic['labels']]
    data = np.vstack(data)
    make_data(data, labels, args.output_dir, 'train')
    # test data
    dic = unpickle(osp.join(args.data_root, 'test_batch'))
    make_data(dic['data'], dic['labels'], args.output_dir, 'test')
    
    # Identity for confusion initialization
    matrix_I = np.identity(10)
    write_matrix(matrix_I, osp.join(args.output_dir, 'identity.txt'))
    pickle(matrix_I, osp.join(args.output_dir, 'identity.pkl'))
Ejemplo n.º 10
0
def train_tracher():
    """
    Partition the entire private (training) data into config.nb_teacher subsets and train config.nb_teacher  teacher models 
    """
    # Load the dataset
    if config.dataset == 'mnist':
        train_dataset = dataset.MNIST(root=config.data_dir, train=True, download=True)
        test_dataset = dataset.MNIST(root=config.data_dir, train=False, download=True)
        ori_train_data= [ data[0] for idx, data in enumerate(train_dataset)]
        ori_test_data = [ data[0] for idx, data in enumerate(test_dataset)]
        test_labels = test_dataset.targets
        train_labels = train_dataset.targets
    elif config.dataset =='svhn':
        train_dataset = dataset.SVHN(root=config.data_dir, split='train', download=True)
        extra_dataset = dataset.SVHN(root=config.data_dir, split='extra', download=True)
        test_dataset = dataset.SVHN(root=config.data_dir, split='test', download=True)
        ori_train_data = np.concatenate((train_dataset.data,extra_dataset.data),axis=0)
        print('ori data shape', ori_train_data.shape)
        ori_train_data = np.transpose(ori_train_data, (0, 2, 3, 1))
        print('orig data shape', ori_train_data.shape)
        #ori_train_data= [ data[0] for idx, data in enumerate(train_dataset.data)]
        #for data in extra_dataset.data:
        #    ori_train_data.append(data)
        #ori_test_data = [ data[0] for idx, data in enumerate(test_dataset.data)]
        ori_test_data = np.transpose(test_dataset.data, (0,2,3,1))
        test_labels = test_dataset.labels
        extra_labels = extra_dataset.labels
        train_labels = [ll for ll in train_dataset.labels]
        for ll in extra_labels:
            train_labels.append(ll)
    batch_len = int(len(ori_train_data)/config.nb_teachers)
    for i in range(0,1):
        dir_path = os.path.join(config.save_model,'pate_'+str(config.nb_teachers))
        utils.mkdir_if_missing(dir_path)
        filename = os.path.join(dir_path, str(config.nb_teachers) + '_teachers_' + str(i) + config.arch+'.checkpoint.pth.tar')
        print('save_path for teacher{}  is {}'.format(i,filename))
        start = i * batch_len
        end = (i + 1) * batch_len
        t_data = ori_train_data[start : end]
        t_labels = train_labels[start: end] 
        network.train_each_teacher(config.teacher_epoch, t_data, t_labels, ori_test_data, test_labels, filename)
Ejemplo n.º 11
0
def main(args):
    id_offset = 0
    merged_train_kv = {}
    merged_val_kv = {}
    for dataset_dir, db_dir in zip(args.dataset_dirs, args.db_dirs):
        train_files, train_labels = read_kv(osp.join(db_dir, 'train.txt'))
        val_files, val_labels = read_kv(osp.join(db_dir, 'val.txt'))
        unique_ids = set(map(int, train_labels + val_labels))
        id_mapping = {idx: i + id_offset for i, idx in enumerate(unique_ids)}
        for k, v in zip(train_files, train_labels):
            merged_train_kv[osp.join(dataset_dir, k)] = id_mapping[int(v)]
        for k, v in zip(val_files, val_labels):
            merged_val_kv[osp.join(dataset_dir, k)] = id_mapping[int(v)]
        id_offset += len(id_mapping)
    mkdir_if_missing(osp.join(args.output_dir))
    train_list = [k + ' ' + str(v) for k, v in merged_train_kv.iteritems()]
    np.random.shuffle(train_list)
    write_list(train_list, osp.join(args.output_dir, 'train.txt'))
    write_kv(merged_val_kv.keys(), map(str, merged_val_kv.values()),
             osp.join(args.output_dir, 'val.txt'))
    print "Max ID:", id_offset
Ejemplo n.º 12
0
def main(args):
    output_dir = args.output_dir
    mkdir_if_missing(osp.join(args.output_dir, 'cam_0'))
    mkdir_if_missing(osp.join(args.output_dir, 'cam_1'))
    # Collect the images of each person into dict
    images = glob(osp.join(args.ilids_dir, 'Persons', '*.jpg'))
    pdict = defaultdict(list)
    for imname in images:
        pid = int(osp.basename(imname)[:4])
        pdict[pid].append(imname)
    # Randomly choose half of the images as cam_0, others as cam_1
    identities = []
    for i, (pid, images) in enumerate(pdict.iteritems()):
        num = len(images)
        np.random.shuffle(images)
        p_images = [[], []]
        for src_file in images[:(num // 2)]:
            tgt_file = 'cam_0/{:05d}_{:05d}.jpg'.format(i, len(p_images[0]))
            shutil.copy(src_file, osp.join(args.output_dir, tgt_file))
            p_images[0].append(tgt_file)
        for src_file in images[(num // 2):]:
            tgt_file = 'cam_1/{:05d}_{:05d}.jpg'.format(i, len(p_images[1]))
            shutil.copy(src_file, osp.join(args.output_dir, tgt_file))
            p_images[1].append(tgt_file)
        identities.append(p_images)
    # Save meta information into a json file
    meta = {'name': 'i-LIDS', 'shot': 'multiple', 'num_cameras': 2}
    meta['identities'] = identities
    write_json(meta, osp.join(output_dir, 'meta.json'))
    # Randomly create a training and test split
    num = len(identities)
    pids = np.random.permutation(num)
    trainval_pids = sorted(pids[:num // 2])
    test_pids = sorted(pids[num // 2:])
    split = {
        'trainval': trainval_pids,
        'test_probe': test_pids,
        'test_gallery': test_pids
    }
    write_json(split, osp.join(output_dir, 'split.json'))
Ejemplo n.º 13
0
def ensemble_preds(nb_teachers, stdnt_data):
    """
  Given a dataset, a number of teachers, and some input data, this helper
  function queries each teacher for predictions on the data and returns
  all predictions in a single array. (That can then be aggregated into
  one single prediction per input using aggregation.py (cf. function
  prepare_student_data() below)
  :param dataset: string corresponding to mnist, cifar10, or svhn
  :param nb_teachers: number of teachers (in the ensemble) to learn from
  :param stdnt_data: unlabeled student training data
  :return: 3d array (teacher id, sample id, probability per class)
  """

    result_shape = (nb_teachers, len(stdnt_data), config.nb_labels)

    # Create array that will hold result
    result = np.zeros(result_shape, dtype=np.float32)
    # Get predictions from each teacher
    for teacher_id in range(nb_teachers):
        # Compute path of checkpoint file for teacher model with ID teacher_id
        if config.dataset == 'celeba':
            dir_path = os.path.join(
                config.save_model,
                'pate_num_teacher_' + str(config.nb_teachers))
        elif config.dataset == 'market':
            dir_path = os.path.join(
                config.save_model,
                'pate_' + config.dataset + str(config.nb_teachers))
        utils.mkdir_if_missing(dir_path)
        filename = os.path.join(
            dir_path,
            str(config.nb_teachers) + '_teachers_' + str(teacher_id) +
            config.arch + '.checkpoint.pth.tar')
        result[teacher_id] = network.pred(stdnt_data, filename)

        # This can take a while when there are a lot of teachers so output status
        print("Computed Teacher " + str(teacher_id) + " softmax predictions")

    return result
Ejemplo n.º 14
0
    def gen_dataset(self, params, dataset_type, seed=0):
        random.seed(seed)
        np.random.seed(seed)
        if dataset_type == 'train':
            num_seq = params.TRAIN_NUM_SEQ
            path = params.TRAIN_SET_PATH
        elif dataset_type == 'val':
            num_seq = params.VAL_NUM_SEQ
            path = params.VAL_SET_PATH
        elif dataset_type == 'test':
            num_seq = params.TEST_NUM_SEQ
            path = params.TEST_SET_PATH
        else:
            raise ValueError('dataset_type must be train, val, or test')

        for i in range(num_seq):
            sdg = SequenceDataGenerator(params.NUM_SHAPE, params.IMG_SIZE,
                                        params.SEQUENCE_LEN,
                                        params.RANDOM_SIZE,
                                        params.ROTATE_SHAPES)
            utils.update_progress(i / num_seq)
            seq = sdg.get_sequence()
            pickle_folder_path = os.path.join(path, f'seq_{i}')
            utils.mkdir_if_missing(pickle_folder_path)
            pickle_full_path = os.path.join(pickle_folder_path,
                                            'sequence.pickle')
            with open(pickle_full_path, 'wb') as handle:
                pickle.dump(seq, handle)
            image_count = 0
            for info in seq:
                image = info['image']
                image_folder_path = os.path.join(path, f'seq_{i}', 'images')
                utils.mkdir_if_missing(image_folder_path)
                image_full_path = os.path.join(image_folder_path,
                                               f'{image_count:05d}.png')
                image = (image * 255).astype(np.uint8)
                cv2.imwrite(image_full_path, image)
                image_count += 1
Ejemplo n.º 15
0
def main(args):
    # cam_0 to cam_15
    for i in xrange(16):
        mkdir_if_missing(osp.join(args.output_dir, 'cam_' + str(i)))
    images = glob(osp.join(args.shinpuhkan_dir, 'images', '*.jpg'))
    images.sort()
    identities = []
    prev_pid = -1
    for name in images:
        name = osp.basename(name)
        p_id = int(name[0:3]) - 1
        c_id = int(name[4:6]) - 1
        if prev_pid != p_id:
            identities.append([])
            prev_cid = -1
        p_images = identities[-1]
        if prev_cid != c_id:
            p_images.append([])
        v_images = p_images[-1]
        file_name = 'cam_{}/{:05d}_{:05d}.jpg'.format(c_id, p_id,
                                                      len(v_images))
        shutil.copy(osp.join(args.shinpuhkan_dir, 'images', name),
                    osp.join(args.output_dir, file_name))
        v_images.append(file_name)
        prev_pid = p_id
        prev_cid = c_id
    # Save meta information into a json file
    meta = {'name': 'Shinpuhkan', 'shot': 'multiple', 'num_cameras': 16}
    meta['identities'] = identities
    write_json(meta, osp.join(args.output_dir, 'meta.json'))
    # We don't test on this dataset. Just use all the data for train / val.
    split = {
        'trainval': range(len(identities)),
        'test_probe': [],
        'test_gallery': []
    }
    write_json(split, osp.join(args.output_dir, 'split.json'))
Ejemplo n.º 16
0
def main(args):
    output_dir = args.output_dir
    mkdir_if_missing(osp.join(args.output_dir, 'cam_0'))
    mkdir_if_missing(osp.join(args.output_dir, 'cam_1'))
    identities = []
    cam1_images = glob(osp.join(args.viper_dir, 'cam_a', '*.bmp'))
    cam2_images = glob(osp.join(args.viper_dir, 'cam_b', '*.bmp'))
    cam1_images.sort()
    cam2_images.sort()
    assert len(cam1_images) == len(cam2_images)
    for i in xrange(len(cam1_images)):
        p_id = len(identities)
        p_images = []
        # view-0
        file_name = 'cam_0/{:05d}_{:05d}.bmp'.format(p_id, 0)
        shutil.copy(cam1_images[i],
            osp.join(args.output_dir, file_name))
        p_images.append([file_name])
        # view-1
        file_name = 'cam_1/{:05d}_{:05d}.bmp'.format(p_id, 0)
        shutil.copy(cam2_images[i],
            osp.join(args.output_dir, file_name))
        p_images.append([file_name])
        identities.append(p_images)
    # Save meta information into a json file
    meta = {'name': 'VIPeR', 'shot': 'single', 'num_cameras': 2}
    meta['identities'] = identities
    write_json(meta, osp.join(args.output_dir, 'meta.json'))
    # Randomly create a training and test split
    num = len(identities)
    pids = np.random.permutation(num)
    trainval_pids = sorted(pids[:num // 2])
    test_pids = sorted(pids[num // 2:])
    split = {'trainval': trainval_pids,
             'test_probe': test_pids,
             'test_gallery': test_pids}
    write_json(split, osp.join(output_dir, 'split.json'))
Ejemplo n.º 17
0
def train_student( nb_teachers):
    """
    This function trains a student using predictions made by an ensemble of
    teachers. The student and teacher models are trained using the same
    neural network architecture.
    :param dataset: string corresponding to celeba
    :param nb_teachers: number of teachers (in the ensemble) to learn from
    :return: True if student training went well
    """
    # Call helper function to prepare student data using teacher predictions
    stdnt_dataset = prepare_student_data(nb_teachers, save=True)

    # Unpack the student dataset
    stdnt_data, stdnt_labels, stdnt_test_data, stdnt_test_labels = stdnt_dataset
    dir_path = os.path.join(config.save_model, config.dataset)
    dir_path = os.path.join(dir_path, 'knn_num_neighbor_' + str(config.nb_teachers))
    utils.mkdir_if_missing(dir_path)
    if config.resnet:
        filename = os.path.join(dir_path,str(config.nb_teachers) + '_stdnt_resnet.checkpoint.pth.tar')

    print('stdnt_label used for train', stdnt_labels.shape)
    network.train_each_teacher(config.student_epoch, stdnt_data, stdnt_labels, stdnt_test_data, stdnt_test_labels,
                               filename)
    return True
Ejemplo n.º 18
0
def main(args):
    output_dir = args.output_dir
    mkdir_if_missing(osp.join(args.output_dir, 'cam_0'))
    mkdir_if_missing(osp.join(args.output_dir, 'cam_1'))
    # Randomly choose 100 people from the 200 shared people as test probe
    p = list(np.random.permutation(200))
    test_probe = range(100)
    test_gallery = range(100)
    identities = []
    for pid in p[:100]:
        p_images = []
        src_file = osp.join(args.prid_dir, 'single_shot', 'cam_a',
                            'person_{:04d}.png'.format(pid + 1))
        tgt_file = osp.join('cam_0',
                            '{:05d}_00000.png'.format(len(identities)))
        shutil.copy(src_file, osp.join(args.output_dir, tgt_file))
        p_images.append([tgt_file])
        src_file = osp.join(args.prid_dir, 'single_shot', 'cam_b',
                            'person_{:04d}.png'.format(pid + 1))
        tgt_file = osp.join('cam_1',
                            '{:05d}_00000.png'.format(len(identities)))
        shutil.copy(src_file, osp.join(args.output_dir, tgt_file))
        p_images.append([tgt_file])
        identities.append(p_images)
    # Other 100 people from the 200 as a part of trainval
    # Choose 10 images randomly from the multi-shot images
    trainval = range(100, 200)
    for pid in p[100:]:
        p_images = [[], []]
        images = glob(
            osp.join(args.prid_dir, 'multi_shot', 'cam_a',
                     'person_{:04d}'.format(pid + 1), '*.png'))
        images = np.random.choice(images,
                                  size=min(10, len(images)),
                                  replace=False)
        for src_file in images:
            tgt_file = osp.join(
                'cam_0', '{:05d}_{:05d}.png'.format(len(identities),
                                                    len(p_images[0])))
            shutil.copy(src_file, osp.join(args.output_dir, tgt_file))
            p_images[0].append(tgt_file)
        images = glob(
            osp.join(args.prid_dir, 'multi_shot', 'cam_b',
                     'person_{:04d}'.format(pid + 1), '*.png'))
        images = np.random.choice(images,
                                  size=min(10, len(images)),
                                  replace=False)
        for src_file in images:
            tgt_file = osp.join(
                'cam_1', '{:05d}_{:05d}.png'.format(len(identities),
                                                    len(p_images[1])))
            shutil.copy(src_file, osp.join(args.output_dir, tgt_file))
            p_images[1].append(tgt_file)
        identities.append(p_images)
    # 201 to 385 cam_a people as another part of trainval
    for pid in xrange(200, 385):
        p_images = [[], []]
        images = glob(
            osp.join(args.prid_dir, 'multi_shot', 'cam_a',
                     'person_{:04d}'.format(pid + 1), '*.png'))
        images = np.random.choice(images,
                                  size=min(10, len(images)),
                                  replace=False)
        for src_file in images:
            tgt_file = osp.join(
                'cam_0', '{:05d}_{:05d}.png'.format(len(identities),
                                                    len(p_images[0])))
            shutil.copy(src_file, osp.join(args.output_dir, tgt_file))
            p_images[0].append(tgt_file)
        trainval.append(len(identities))
        identities.append(p_images)
    # 201 to 749 cam_b people as additional test gallery
    for pid in xrange(200, 749):
        src_file = osp.join(args.prid_dir, 'single_shot', 'cam_b',
                            'person_{:04d}.png'.format(pid + 1))
        tgt_file = osp.join('cam_1',
                            '{:05d}_00000.png'.format(len(identities)))
        shutil.copy(src_file, osp.join(args.output_dir, tgt_file))
        p_images = [[], [tgt_file]]
        test_gallery.append(len(identities))
        identities.append(p_images)
    # Save meta information into a json file
    meta = {'name': 'PRID', 'shot': 'multiple', 'num_cameras': 2}
    meta['identities'] = identities
    write_json(meta, osp.join(args.output_dir, 'meta.json'))
    # We have only one split
    split = {
        'trainval': trainval,
        'test_probe': test_probe,
        'test_gallery': test_gallery
    }
    write_json(split, osp.join(output_dir, 'split.json'))
Ejemplo n.º 19
0
def train_task(args, train_loader, current_task, prototype={}, pre_index=0):
    num_class_per_task = (args.num_class - args.nb_cl_fg) // args.num_task
    task_range = list(
        range(args.nb_cl_fg + (current_task - 1) * num_class_per_task,
              args.nb_cl_fg + current_task * num_class_per_task))
    if num_class_per_task == 0:
        pass  # JT
    else:
        old_task_factor = args.nb_cl_fg // num_class_per_task + current_task - 1
        print(old_task_factor)
    log_dir = os.path.join(args.ckpt_dir, args.log_dir)
    mkdir_if_missing(log_dir)

    sys.stdout = logging.Logger(
        os.path.join(log_dir, 'log_task{}.txt'.format(current_task)))
    tb_writer = SummaryWriter(log_dir)
    display(args)

    if 'miniimagenet' in args.data:
        model = models.create('resnet18_imagenet',
                              pretrained=False,
                              feat_dim=args.feat_dim,
                              embed_dim=args.num_class,
                              hidden_dim=256,
                              norm=True)
    elif 'cifar100' in args.data:
        model = models.create('resnet18_cifar',
                              pretrained=False,
                              feat_dim=args.feat_dim,
                              hidden_dim=256,
                              embed_dim=args.num_class,
                              norm=True)

    if current_task > 0:
        if 'miniimagenet' in args.data:
            model = models.create('resnet18_imagenet',
                                  pretrained=False,
                                  feat_dim=args.feat_dim,
                                  embed_dim=args.num_class,
                                  hidden_dim=256,
                                  norm=True)
        elif 'cifar100' in args.data:
            model = models.create('resnet18_cifar',
                                  pretrained=False,
                                  feat_dim=args.feat_dim,
                                  hidden_dim=256,
                                  embed_dim=args.num_class,
                                  norm=True)
        model = torch.load(
            os.path.join(
                log_dir, 'task_' + str(current_task - 1).zfill(2) +
                '_%d_model.pkl' % int(args.epochs - 1)))
        model_old = deepcopy(model)
        model_old.eval()
        model_old = freeze_model(model_old)

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    # model = model.cuda()
    model = model.to(device)

    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=args.lr,
                                 weight_decay=args.weight_decay)
    scheduler = StepLR(optimizer,
                       step_size=args.lr_decay_step,
                       gamma=args.lr_decay)

    loss_mse = torch.nn.MSELoss(reduction='sum')

    # # Loss weight for gradient penalty used in W-GAN
    lambda_gp = args.lambda_gp
    lambda_lwf = args.gan_tradeoff
    # Initialize generator and discriminator
    if current_task == 0:
        generator = Generator(feat_dim=args.feat_dim,
                              latent_dim=args.latent_dim,
                              hidden_dim=args.hidden_dim,
                              class_dim=args.num_class,
                              norm=True)
        discriminator = Discriminator(feat_dim=args.feat_dim,
                                      hidden_dim=args.hidden_dim,
                                      class_dim=args.num_class)
    else:
        generator = torch.load(
            os.path.join(
                log_dir, 'task_' + str(current_task - 1).zfill(2) +
                '_%d_model_generator.pkl' % int(args.epochs_gan - 1)))
        discriminator = torch.load(
            os.path.join(
                log_dir, 'task_' + str(current_task - 1).zfill(2) +
                '_%d_model_discriminator.pkl' % int(args.epochs_gan - 1)))
        generator_old = deepcopy(generator)
        generator_old.eval()
        generator_old = freeze_model(generator_old)

    cuda = torch.cuda.is_available()
    FloatTensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
    g_len = 0
    d_len = 0
    for p in generator.parameters():
        g_len += 1
    for p in discriminator.parameters():
        d_len += 1
    learned_lrs = []
    params = []
    for i in range(args.update_step):
        g_lrs = [
            Variable(FloatTensor(1).fill_(args.update_lr), requires_grad=True)
        ] * g_len  # len(generator.parameters())
        d_lrs = [
            Variable(FloatTensor(1).fill_(args.update_lr), requires_grad=True)
        ] * d_len  # len(discriminator.parameters())
        learned_lrs.append((g_lrs, d_lrs))
        for param_list in learned_lrs[i]:
            params += param_list

    generator = generator.to(device)
    discriminator = discriminator.to(device)

    optimizer_G = torch.optim.Adam(generator.parameters(),
                                   lr=args.gan_lr,
                                   betas=(0.5, 0.999))
    optimizer_D = torch.optim.Adam(discriminator.parameters(),
                                   lr=args.gan_lr,
                                   betas=(0.5, 0.999))
    optimizer_lr = torch.optim.Adam(params, lr=args.lr)

    scheduler_G = StepLR(optimizer_G, step_size=150, gamma=0.3)
    scheduler_D = StepLR(optimizer_D, step_size=150, gamma=0.3)

    for p in generator.parameters():  # set requires_grad to False
        p.requires_grad = False

    for epoch in range(args.epochs):

        loss_log = {
            'C/loss': 0.0,
            'C/loss_aug': 0.0,
            'C/loss_cls': 0.0,
            'C/loss_cls_q': 0.0
        }

        ##### MAML on feature extraction

        for step, (x_spt, y_spt, x_qry, y_qry) in enumerate(train_loader):
            x_spt, y_spt, x_qry, y_qry = x_spt.to(device), y_spt.to(
                device), x_qry.to(device), y_qry.to(device)

            loss = torch.zeros(1).to(device)
            loss_cls = torch.zeros(1).to(device)
            loss_aug = torch.zeros(1).to(device)
            loss_tmp = torch.zeros(1).to(device)

            BatchSize, setsz, c_, h, w = x_spt.size()
            querysz = x_qry.size(1)

            losses_q = [0 for _ in range(args.update_step + 1)
                        ]  # losses_q[i] is the loss on step i
            corrects = [0.0 for _ in range(args.update_step + 1)]
            correct_s = [0.0 for _ in range(args.update_step + 1)]

            y_onehot = torch.cuda.FloatTensor(setsz, args.num_class)
            y_onehot_q = torch.cuda.FloatTensor(querysz, args.num_class)

            for i in range(args.BatchSize):
                # 1. run the i-th task and compute loss for k=0
                embed_feat = model(x_spt[i])
                if current_task == 0:
                    soft_feat = model.embed(embed_feat)
                    loss_cls = torch.nn.CrossEntropyLoss()(soft_feat, y_spt[i])
                    loss = loss.clone() + loss_cls
                else:
                    embed_feat_old = model_old(x_spt[i])

                ### Feature Extractor Loss
                if current_task > 0:
                    loss_aug = torch.dist(embed_feat, embed_feat_old, 2)
                    loss = loss.clone(
                    ) + args.tradeoff * loss_aug * old_task_factor

                ### Replay and Classification Loss
                if current_task > 0:
                    embed_sythesis = []
                    embed_label_sythesis = []
                    ind = list(range(len(pre_index)))

                    if args.mean_replay:
                        for _ in range(setsz):
                            np.random.shuffle(ind)
                            tmp = prototype['class_mean'][
                                ind[0]] + np.random.normal(
                                ) * prototype['class_std'][ind[0]]
                            embed_sythesis.append(tmp)
                            embed_label_sythesis.append(
                                prototype['class_label'][ind[0]])
                        embed_sythesis = np.asarray(embed_sythesis)
                        embed_label_sythesis = np.asarray(embed_label_sythesis)
                        embed_sythesis = torch.from_numpy(embed_sythesis).to(
                            device)
                        embed_label_sythesis = torch.from_numpy(
                            embed_label_sythesis)
                    else:
                        for _ in range(setsz):
                            np.random.shuffle(ind)
                            embed_label_sythesis.append(pre_index[ind[0]])
                        embed_label_sythesis = np.asarray(embed_label_sythesis)
                        embed_label_sythesis = torch.from_numpy(
                            embed_label_sythesis).to(device)
                        y_onehot.zero_()
                        y_onehot.scatter(1, embed_label_sythesis[:, None], 1)
                        syn_label_pre = y_onehot.to(device)

                        z = torch.Tensor(
                            np.random.normal(
                                0, 1, (setsz, args.latent_dim))).to(device)

                        embed_sythesis = generator(z, syn_label_pre)

                    embed_sythesis = torch.cat((embed_feat, embed_sythesis))
                    embed_label_sythesis = torch.cat(
                        (y_spt[i], embed_label_sythesis.to(device)))
                    soft_feat_syt = model.embed(embed_sythesis)

                    batch_size1 = x_spt[i].shape[0]
                    batch_size2 = embed_feat.shape[0]

                    loss_cls = torch.nn.CrossEntropyLoss()(
                        soft_feat_syt[:batch_size1],
                        embed_label_sythesis[:batch_size1])
                    loss_cls_old = torch.nn.CrossEntropyLoss()(
                        soft_feat_syt[batch_size2:],
                        embed_label_sythesis[batch_size2:])

                    loss_cls += loss_cls_old * old_task_factor
                    loss_cls /= args.nb_cl_fg // num_class_per_task + current_task
                    loss += loss_cls

                grad = torch.autograd.grad(loss,
                                           model.parameters(),
                                           create_graph=True,
                                           retain_graph=True)

                # this is the loss and accuracy before first update
                with torch.no_grad():
                    # [setsz, nway]
                    embed_feat_q = model(x_qry[i])
                    soft_feat_q = model.embed(embed_feat_q)

                    loss_q = torch.nn.CrossEntropyLoss()(soft_feat_q, y_qry[i])
                    losses_q[0] += loss_q

                    embed_feat = model(x_spt[i])
                    soft_feat = model.embed(embed_feat)

                    pred_s = F.softmax(soft_feat, dim=1).argmax(dim=1)
                    corr = torch.eq(pred_s,
                                    y_spt[i]).sum().item()  # convert to numpy
                    correct_s[0] = correct_s[0] + corr

                    pred_q = F.softmax(soft_feat_q, dim=1).argmax(dim=1)
                    correct = torch.eq(pred_q, y_qry[i]).sum().item()
                    corrects[0] = corrects[0] + correct

                # this is the loss and accuracy after the first update
                with torch.no_grad():
                    # [setsz, nway]
                    for e, param in enumerate(model.parameters(), 0):
                        param.data -= args.update_lr * grad[e]

                    embed_feat_q = model(x_qry[i])
                    soft_feat_q = model.embed(embed_feat_q)

                    loss_q = torch.nn.CrossEntropyLoss()(soft_feat_q, y_qry[i])
                    losses_q[1] += loss_q
                    # [setsz]
                    embed_feat = model(x_spt[i])
                    soft_feat = model.embed(embed_feat)

                    pred_s = F.softmax(soft_feat, dim=1).argmax(dim=1)
                    corr = torch.eq(pred_s,
                                    y_spt[i]).sum().item()  # convert to numpy
                    correct_s[1] = correct_s[1] + corr

                    pred_q = F.softmax(soft_feat_q, dim=1).argmax(dim=1)
                    correct = torch.eq(pred_q, y_qry[i]).sum().item()
                    corrects[1] = corrects[1] + correct

                for k in range(1, args.update_step):
                    # 1. run the i-th task and compute loss for k=1~K-1
                    embed_feat = model(x_spt[i])

                    loss = torch.zeros(1).to(device)
                    if current_task > 0:
                        embed_feat_old = model_old(x_spt[i])
                        loss_aug = torch.dist(embed_feat, embed_feat_old, 2)
                        loss += args.tradeoff * loss_aug * old_task_factor
                        embed_sythesis = []
                        embed_label_sythesis = []
                        ind = list(range(len(pre_index)))
                        if args.mean_replay:
                            for _ in range(setsz):
                                np.random.shuffle(ind)
                                tmp = prototype['class_mean'][
                                    ind[0]] + np.random.normal(
                                    ) * prototype['class_std'][ind[0]]
                                embed_sythesis.append(tmp)
                                embed_label_sythesis.append(
                                    prototype['class_label'][ind[0]])
                            embed_sythesis = np.asarray(embed_sythesis)
                            embed_label_sythesis = np.asarray(
                                embed_label_sythesis)
                            embed_sythesis = torch.from_numpy(
                                embed_sythesis).to(device)
                            embed_label_sythesis = torch.from_numpy(
                                embed_label_sythesis)
                        else:
                            for _ in range(setsz):
                                np.random.shuffle(ind)
                                embed_label_sythesis.append(pre_index[ind[0]])
                            embed_label_sythesis = np.asarray(
                                embed_label_sythesis)
                            embed_label_sythesis = torch.from_numpy(
                                embed_label_sythesis).to(device)
                            y_onehot.zero_()
                            y_onehot.scatter(1, embed_label_sythesis[:, None],
                                             1)
                            syn_label_pre = y_onehot.to(device)

                            z = torch.Tensor(
                                np.random.normal(
                                    0, 1, (setsz, args.latent_dim))).to(device)
                            embed_sythesis = generator(z, syn_label_pre)

                        embed_sythesis = torch.cat(
                            (embed_feat, embed_sythesis))
                        embed_label_sythesis = torch.cat(
                            (y_spt[i], embed_label_sythesis.to(device)))

                        soft_feat_syt = model.embed(embed_sythesis)

                        batch_size1 = x_spt[i].shape[0]
                        batch_size2 = embed_feat.shape[0]

                        loss_cls = torch.nn.CrossEntropyLoss()(
                            soft_feat_syt[:batch_size1],
                            embed_label_sythesis[:batch_size1])
                        loss_cls_old = torch.nn.CrossEntropyLoss()(
                            soft_feat_syt[batch_size2:],
                            embed_label_sythesis[batch_size2:])
                        loss_cls += loss_cls_old * old_task_factor
                        loss_cls /= args.nb_cl_fg // num_class_per_task + current_task
                        loss += loss_cls
                    else:
                        soft_feat = model.embed(embed_feat)
                        loss_cls = torch.nn.CrossEntropyLoss()(soft_feat,
                                                               y_spt[i])
                        loss += loss_cls
                    # 2. compute grad on theta_pi
                    grad = torch.autograd.grad(loss,
                                               model.parameters(),
                                               create_graph=True,
                                               retain_graph=True,
                                               allow_unused=True)
                    # 3. theta_pi = theta_pi - train_lr * grad
                    for e, param in enumerate(model.parameters(), 0):
                        param.data -= args.update_lr * grad[e]
                    embed_feat = model(x_spt[i])
                    soft_feat = model.embed(embed_feat)
                    # soft_feat = mlp(embed_feat)
                    embed_feat_q = model(x_qry[i])
                    soft_feat_q = model.embed(embed_feat_q)

                    # loss_q will be overwritten and just keep the loss_q on last update step.
                    loss_q = torch.nn.CrossEntropyLoss()(soft_feat_q, y_qry[i])
                    losses_q[k + 1] += loss_q

                    with torch.no_grad():
                        pred_s = F.softmax(soft_feat, dim=1).argmax(dim=1)
                        corr = torch.eq(
                            pred_s, y_spt[i]).sum().item()  # convert to numpy
                        correct_s[k + 1] = correct_s[k + 1] + corr

                        pred_q = F.softmax(soft_feat_q, dim=1).argmax(dim=1)
                        correct = torch.eq(
                            pred_q, y_qry[i]).sum().item()  # convert to numpy
                        corrects[k + 1] = corrects[k + 1] + correct

            # end of all tasks
            # sum over all losses on query set across all tasks
            loss_q = losses_q[-1] / BatchSize
            loss_q = Variable(loss_q, requires_grad=True)

            # optimize theta parameters
            optimizer.zero_grad()
            loss_q.backward()
            optimizer.step()
            scheduler.step()

            accs = np.array([float(c)
                             for c in corrects]) / float(querysz * BatchSize)
            accs_spt = np.array([float(c) for c in correct_s]) / float(
                setsz * BatchSize)
            loss_log['C/loss'] += loss.item()
            loss_log['C/loss_cls'] += loss_cls.item()
            loss_log['C/loss_aug'] += args.tradeoff * loss_aug.item(
            ) if args.tradeoff != 0 else 0
            loss_log['C/loss_cls_q'] += loss_q.item()

            del loss_cls
            del loss_q

        print(
            '[Metric Epoch %05d]\t Total Loss: %.3f \t LwF Loss: %.3f \t Spt Accuracy FeatureX: %.3f \t Query Loss: %.3f \t Query Accuracy FeatureX: %.3f \t'
            % (epoch + 1, loss_log['C/loss'], loss_log['C/loss_aug'],
               accs_spt[-1], loss_log['C/loss_cls_q'], accs[-1]))
        for k, v in loss_log.items():
            if v != 0:
                tb_writer.add_scalar(
                    'Task {} - Classifier/{}'.format(current_task, k), v,
                    epoch + 1)

        tb_writer.add_scalar('Task {}'.format(current_task), accs[-1],
                             epoch + 1)
        if epoch == args.epochs - 1:
            torch.save(
                model,
                os.path.join(
                    log_dir, 'task_' + str(current_task).zfill(2) +
                    '_%d_model.pkl' % epoch))


################# feature extraction training end ########################

############################################## GAN Training ####################################################
    model = model.eval()

    for p in model.parameters():  # set requires_grad to False
        p.requires_grad = False
    for p in generator.parameters():  # set requires_grad to True
        p.requires_grad = True
    for p in discriminator.parameters():
        p.requires_grad = True
    criterion_softmax = torch.nn.CrossEntropyLoss().to(device)
    if current_task != args.num_task:
        for epoch in range(args.epochs_gan):
            loss_log = {
                'D/loss': 0.0,
                'D/new_rf': 0.0,
                'D/new_lbls': 0.0,
                'D/new_gp': 0.0,
                'D/prev_rf': 0.0,
                'D/prev_lbls': 0.0,
                'D/prev_gp': 0.0,
                'D/loss_q': 0.0,
                'D/new_rf_q': 0.0,
                'D/new_lbls_q': 0.0,
                'D/new_gp_q': 0.0,
                'G/loss': 0.0,
                'G/new_rf': 0.0,
                'G/new_lbls': 0.0,
                'G/prev_rf': 0.0,
                'G/prev_mse': 0.0,
                'G/new_classifier': 0.0,
                'G/loss_q': 0.0,
                'G/new_rf_q': 0.0,
                'G/new_lbls_q': 0.0,
                'G/new_gp_q': 0.0,
                'E/kld': 0.0,
                'E/mse': 0.0,
                'E/loss': 0.0
            }

            for step, (x_spt, y_spt, x_qry,
                       y_qry) in enumerate(train_loader, 0):
                x_spt, y_spt, x_qry, y_qry = x_spt.to(device), y_spt.to(
                    device), x_qry.to(device), y_qry.to(device)

                BatchSize, setsz, c_, h, w = x_spt.size()
                querysz = x_qry.size(1)

                d_losses_q = [0.0 for _ in range(args.update_step)]
                g_losses_q = [0.0 for _ in range(args.update_step)]

                y_onehot = torch.cuda.FloatTensor(setsz, args.num_class)
                y_onehot_q = torch.cuda.FloatTensor(querysz, args.num_class)
                y_onehot_pre = torch.cuda.FloatTensor(setsz, args.num_class)

                for i in range(args.BatchSize):  # This is inner loop not task
                    inputs = Variable(x_spt[i])
                    labels = y_spt[i]

                    real_feat = model(inputs)
                    z = torch.Tensor(
                        np.random.normal(0, 1,
                                         (setsz, args.latent_dim))).to(device)

                    labels_q = y_qry[i]
                    real_feat_q = model(x_qry[i])

                    z_q = torch.Tensor(
                        np.random.normal(
                            0, 1, (querysz, args.latent_dim))).to(device)

                    y_onehot.zero_()
                    y_onehot.scatter(1, labels[:, None], 1)
                    syn_label = y_onehot.to(device)
                    y_onehot_q.zero_()
                    y_onehot_q.scatter(1, labels_q[:, None], 1)
                    syn_label_q = y_onehot_q.to(device)

                    ############################# Train MetaGAN ###########################

                    for k in range(args.update_step):

                        fake_feat = generator(z, syn_label)

                        fake_validity, disc_fake_acgan = discriminator(
                            fake_feat, syn_label)
                        real_validity, disc_real_acgan = discriminator(
                            real_feat, syn_label)

                        if current_task == 0:
                            loss_aug = 0 * torch.sum(fake_validity)
                        else:
                            ind = list(range(len(pre_index)))
                            embed_label_sythesis = []
                            for _ in range(setsz):
                                np.random.shuffle(ind)
                                embed_label_sythesis.append(pre_index[ind[0]])

                            embed_label_sythesis = np.asarray(
                                embed_label_sythesis)
                            embed_label_sythesis = torch.from_numpy(
                                embed_label_sythesis)
                            y_onehot_pre.zero_()
                            y_onehot_pre.scatter(
                                1, embed_label_sythesis[:, None].to(device), 1)
                            syn_label_pre = y_onehot_pre.to(device)

                            pre_feat = generator(z, syn_label_pre)
                            pre_feat_old = generator_old(z, syn_label_pre)
                            loss_aug = loss_mse(pre_feat, pre_feat_old)

                        # Adversarial loss (wasserstein)

                        g_loss_lbls = criterion_softmax(
                            disc_fake_acgan, labels.to(device))
                        d_loss_rf = -torch.mean(real_validity) + torch.mean(
                            fake_validity)
                        d_gradient_penalty = compute_gradient_penalty(
                            discriminator, real_feat, fake_feat,
                            syn_label).mean()
                        d_loss_lbls = criterion_softmax(
                            disc_real_acgan, labels.to(device))
                        d_loss = d_loss_rf + lambda_gp * d_gradient_penalty + 0.5 * (
                            d_loss_lbls + g_loss_lbls)

                        g_loss_rf = -torch.mean(fake_validity)
                        g_loss = g_loss_rf + lambda_lwf * old_task_factor * loss_aug + g_loss_lbls

                        grad_d = torch.autograd.grad(
                            d_loss,
                            discriminator.parameters(),
                            create_graph=True,
                            retain_graph=True)
                        grad_g = torch.autograd.grad(g_loss,
                                                     generator.parameters(),
                                                     create_graph=True,
                                                     retain_graph=True)

                        grad_d = clip_grad_by_norm_(grad_d, max_norm=5.0)
                        grad_g = clip_grad_by_norm_(grad_g, max_norm=5)

                        g_lr, d_lr = learned_lrs[k]

                        for e, param in enumerate(discriminator.parameters(),
                                                  0):
                            param.data = param.data.clone() - d_lr[e] * grad_d[
                                e]  # args.update_lr * grad_d[e]
                        for e, param in enumerate(generator.parameters(), 0):
                            param.data = param.data.clone() - g_lr[e] * grad_g[
                                e]  # args.update_lr * grad_g[e]

                        fake_feat_q = generator(z_q, syn_label_q)
                        fake_validity_q, disc_fake_acgan_q = discriminator(
                            fake_feat_q, syn_label_q)
                        real_validity_q, disc_real_acgan_q = discriminator(
                            real_feat_q, syn_label_q)

                        # Adversarial loss query
                        d_loss_rf_q = -torch.mean(
                            real_validity_q) + torch.mean(fake_validity_q)
                        d_gradient_penalty_q = compute_gradient_penalty(
                            discriminator, real_feat_q, fake_feat_q,
                            syn_label_q).mean()
                        d_loss_lbls_q = criterion_softmax(
                            disc_real_acgan_q, labels_q.to(device))
                        d_loss_q = d_loss_rf_q + lambda_gp * d_gradient_penalty_q + d_loss_lbls_q
                        d_losses_q[
                            k] = d_losses_q[k] + d_loss_q  # + d_loss_lbls_q

                        g_loss_rf_q = -torch.mean(fake_validity_q)
                        g_loss_lbls_q = criterion_softmax(
                            disc_fake_acgan_q, labels_q.to(device))
                        g_loss_q = g_loss_rf_q + g_loss_lbls_q  # + lambda_lwf*old_task_factor * loss_aug_q
                        g_losses_q[k] = g_losses_q[k] + g_loss_lbls_q

                #with torch.autograd.detect_anomaly():
                optimizer_D.zero_grad()
                optimizer_G.zero_grad()
                optimizer_lr.zero_grad()
                d_loss_q_total = d_losses_q[-1].clone() / args.BatchSize
                g_loss_q_total = g_losses_q[-1].clone() / args.BatchSize
                d_loss_q_total.backward()
                g_loss_q_total.backward()
                torch.nn.utils.clip_grad_norm_(discriminator.parameters(), 5)
                torch.nn.utils.clip_grad_norm_(generator.parameters(), 5)
                optimizer_D.step()
                optimizer_G.step()
                optimizer_lr.step()
                scheduler_G.step()
                scheduler_G.step()

                loss_log['D/loss'] += d_loss.item()
                loss_log['D/new_rf'] += d_loss_rf.item()
                loss_log['D/new_lbls'] += d_loss_lbls.item()  #!!!
                loss_log['D/new_gp'] += d_gradient_penalty.item(
                ) if lambda_gp != 0 else 0
                loss_log['D/loss_q'] += d_loss_q_total.item()
                #loss_log['D/new_rf_q'] += d_loss_rf_q.item()
                #loss_log['D/new_lbls_q'] += d_loss_lbls_q.item() #!!!
                #loss_log['D/new_gp_q'] += d_gradient_penalty_q.item() if lambda_gp != 0 else 0
                del d_loss_rf, d_loss_lbls

                loss_log['G/loss'] += g_loss.item()
                loss_log['G/new_rf'] += g_loss_rf.item()
                loss_log['G/new_lbls'] += g_loss_lbls.item()  #!
                loss_log['G/loss_q'] += g_loss_q_total.item()
                #loss_log['G/new_rf_q'] += g_loss_rf_q.item()
                #loss_log['G/new_lbls_q'] += g_loss_lbls_q.item() #!!!
                #loss_log['G/new_classifier'] += 0 #!
                loss_log['G/prev_mse'] += loss_aug.item(
                ) if lambda_lwf != 0 else 0

                del g_loss_rf, g_loss_lbls

            print(
                '[GAN Epoch %05d]\t D Total Loss: %.3f \t G Total Loss: %.3f \t LwF Loss: %.3f'
                % (epoch + 1, loss_log['D/loss'], loss_log['G/loss'],
                   loss_log['G/prev_rf']))
            print(
                '[GAN Epoch %05d]\t D Total Loss Query: %.3f \t G Total Loss Query: %.3f \t'
                % (epoch + 1, loss_log['D/loss_q'], loss_log['G/loss_q']))
            for k, v in loss_log.items():
                if v != 0:
                    tb_writer.add_scalar(
                        'Task {} - GAN/{}'.format(current_task, k), v,
                        epoch + 1)

            if epoch == args.epochs_gan - 1:
                torch.save(
                    generator,
                    os.path.join(
                        log_dir, 'task_' + str(current_task).zfill(2) +
                        '_%d_model_generator.pkl' % epoch))
                torch.save(
                    discriminator,
                    os.path.join(
                        log_dir, 'task_' + str(current_task).zfill(2) +
                        '_%d_model_discriminator.pkl' % epoch))
    tb_writer.close()

    prototype = compute_prototype(model,
                                  train_loader,
                                  batch_size=args.BatchSize)  #!
    return prototype
Ejemplo n.º 20
0
def main(args):
    # s_ = time.time()

    save_dir = args.save_dir
    mkdir_if_missing(save_dir)

    sys.stdout = logging.Logger(os.path.join(save_dir, 'log.txt'))
    display(args)
    start = 0

    model = models.create(args.net, pretrained=True, dim=args.dim)

    # for vgg and densenet
    if args.resume is None:
        model_dict = model.state_dict()

    else:
        # resume model
        print('load model from {}'.format(args.resume))
        chk_pt = load_checkpoint(args.resume)
        weight = chk_pt['state_dict']
        start = chk_pt['epoch']
        model.load_state_dict(weight)

    model = torch.nn.DataParallel(model)
    model = model.cuda()

    # freeze BN
    if args.freeze_BN is True:
        print(40 * '#', '\n BatchNorm frozen')
        model.apply(set_bn_eval)
    else:
        print(40 * '#', 'BatchNorm NOT frozen')

    # Fine-tune the model: the learning rate for pre-trained parameter is 1/10
    new_param_ids = set(map(id, model.module.classifier.parameters()))

    new_params = [
        p for p in model.module.parameters() if id(p) in new_param_ids
    ]

    base_params = [
        p for p in model.module.parameters() if id(p) not in new_param_ids
    ]

    param_groups = [{
        'params': base_params,
        'lr_mult': 0.0
    }, {
        'params': new_params,
        'lr_mult': 1.0
    }]

    print('initial model is save at %s' % save_dir)

    optimizer = torch.optim.Adam(param_groups,
                                 lr=args.lr,
                                 weight_decay=args.weight_decay)

    criterion = losses.create(args.loss,
                              margin=args.margin,
                              alpha=args.alpha,
                              base=args.loss_base).cuda()

    # Decor_loss = losses.create('decor').cuda()
    data = DataSet.create(args.data,
                          ratio=args.ratio,
                          width=args.width,
                          origin_width=args.origin_width,
                          root=args.data_root)

    train_loader = torch.utils.data.DataLoader(
        data.train,
        batch_size=args.batch_size,
        sampler=FastRandomIdentitySampler(data.train,
                                          num_instances=args.num_instances),
        drop_last=True,
        pin_memory=True,
        num_workers=args.nThreads)

    # save the train information

    for epoch in range(start, args.epochs):

        train(epoch=epoch,
              model=model,
              criterion=criterion,
              optimizer=optimizer,
              train_loader=train_loader,
              args=args)

        if epoch == 1:
            optimizer.param_groups[0]['lr_mul'] = 0.1

        if (epoch + 1) % args.save_step == 0 or epoch == 0:
            if use_gpu:
                state_dict = model.module.state_dict()
            else:
                state_dict = model.state_dict()

            save_checkpoint({
                'state_dict': state_dict,
                'epoch': (epoch + 1),
            },
                            is_best=False,
                            fpath=osp.join(
                                args.save_dir,
                                'ckp_ep' + str(epoch + 1) + '.pth.tar'))
Ejemplo n.º 21
0
if __name__ == '__main__':
    if len(sys.argv) != 2:
        print("Usage: python main.py result_sha(e.g., car_3d_det_test)")
        sys.exit(1)

    result_sha = sys.argv[1]
    save_root = '/home/eshan/AB3DMOT/results'

    det_id2str = {1: 'Pedestrian', 2: 'Car', 3: 'Cyclist'}
    seq_file_list, num_seq = load_list_from_folder(
        os.path.join('/home/eshan/AB3DMOT/data/KITTI', result_sha))
    total_time = 0.0
    total_frames = 0
    save_dir = os.path.join(save_root, result_sha)
    mkdir_if_missing(save_dir)
    eval_dir = os.path.join(save_dir, 'data')
    mkdir_if_missing(eval_dir)
    for seq_file in seq_file_list:
        _, seq_name, _ = fileparts(seq_file)
        mot_tracker = AB3DMOT()
        seq_dets = np.loadtxt(seq_file, delimiter=',')  # load detections
        eval_file = os.path.join(eval_dir, seq_name + '.txt')
        eval_file = open(eval_file, 'w')
        save_trk_dir = os.path.join(save_dir, 'trk_withid', seq_name)
        mkdir_if_missing(save_trk_dir)
        print("Processing %s." % (seq_name))
        for frame in range(int(seq_dets[:, 0].min()),
                           int(seq_dets[:, 0].max()) + 1):
            save_trk_file = os.path.join(save_trk_dir, '%06d.txt' % frame)
            save_trk_file = open(save_trk_file, 'w')
Ejemplo n.º 22
0
def prepare_student_data(nb_teachers, save=False):
    """
    Takes a dataset name and the size of the teacher ensemble and prepares
    training data for the student model, according to parameters indicated
    in flags above.
    :param dataset: string corresponding to mnist, cifar10, or svhn
    :param nb_teachers: number of teachers (in the ensemble) to learn from
    :param save: if set to True, will dump student training labels predicted by
                 the ensemble of teachers (with Laplacian noise) as npy files.
                 It also dumps the clean votes for each class (without noise) and
                 the labels assigned by teachers
    :return: pairs of (data, labels) to be used for student training and testing

    """

    # Load the dataset
    if config.dataset == 'celeba':
        dataset = data_manager.init_img_dataset(root=config.data_dir, name=config.dataset)
        test_data = dataset.test_data
        test_labels = dataset.test_label
        train_data = dataset.train_data
        train_labels = dataset.train_label

    elif config.dataset =='market':
        data_dir = '../dataset/market1501'
        train_dataset = Train_Dataset(data_dir, dataset_name=dataset_dict[config.dataset],
                                        train_val='train')
        test_dataset = Test_Dataset(data_dir, dataset_name=dataset_dict[config.dataset],
                                             query_gallery='gallery')

        train_data = train_dataset.train_data
        train_labels = train_dataset.train_label
        test_data = test_dataset.data
        test_labels = test_dataset.label
        train_labels = np.array(train_labels,dtype =np.int32)
        test_labels = np.array(test_labels,dtype = np.int32)
        print('len of total test data in market',len(test_labels))
    else:
        return False



    # Make sure there is data leftover to be used as a test set
    assert config.stdnt_share < len(test_data)



    ori_test_data = test_data
    # for test


    train_data, test_data = extract_feature(train_data, test_data)

    stdnt_data = test_data[:config.stdnt_share]
    # the remaining 1000 records is the holdout for evaluating
    share_index =np.random.choice(test_data[:-1000].shape[0],config.stdnt_share)
    stdnt_data = test_data[share_index]
    picked_stdnt_data = [ori_test_data[idx] for idx in share_index]
    num_train = train_data.shape[0]
    teachers_preds = np.zeros([stdnt_data.shape[0], config.nb_labels])

    tau_teachers_preds=[]
    # a weighted teacher predtion with clippling
    for idx in range(len(stdnt_data)):
        if idx % 100 == 0:
            print('idx=', idx)
        query_data = stdnt_data[idx]
        select_teacher = np.random.choice(train_data.shape[0], int(prob * num_train))
        dis = np.linalg.norm(train_data[select_teacher] - query_data, axis=1)
        k_index = select_teacher[np.argsort(dis)[:config.nb_teachers]]
        # sum over the number of teachers, which make it easy to compute their votings
        if config.use_tau:
            tau_teachers_preds.append(tau_limit(train_labels[k_index,:]))
        teachers_preds[idx] = np.sum(train_labels[k_index, :], axis=0)


    teachers_preds = np.asarray(teachers_preds, dtype=np.int32)
    if config.use_tau:
    
        preds_tau = np.asarray(tau_teachers_preds, dtype = np.float32)
        acct.compose_poisson_subsampled_mechanisms(gaussian, prob, coeff=config.stdnt_share)
        count_zero_list = config.nb_teachers * np.ones([config.stdnt_share,config.nb_labels]) - teachers_preds
        idx, stdnt_labels = aggregation.aggregation_knn(teachers_preds, config.gau_scale,count_zero_list=count_zero_list)
    else:    
        acct.compose_poisson_subsampled_mechanisms(gaussian, prob, coeff=config.stdnt_share)
        idx, stdnt_labels = aggregation.aggregation_knn(teachers_preds, config.gau_scale)
    # compute privacy loss
    print("Composition of student  subsampled Gaussian mechanisms gives ", (acct.get_eps(delta), delta))

    # Print accuracy of aggregated label
    #ac_ag_labels = hamming_accuracy(stdnt_labels, test_labels[:config.stdnt_share], torch=False)
    ac_ag_labels = hamming_accuracy(stdnt_labels, test_labels[share_index], torch=False)
    precision = hamming_precision(stdnt_labels, test_labels[share_index], torch=False)
    print("Accuracy of the aggregated labels: " + str(ac_ag_labels))
    print('precision of the aggregated labels'+str(precision))
    current_eps = acct.get_eps(config.delta)
    # Store unused part of test set for use as a test set after student training
    stdnt_test_data = ori_test_data[-1000:]
    stdnt_test_labels = test_labels[-1000:]

    if save:
      # Prepare filepath for numpy dump of labels produced by noisy aggregation
      dir_path = os.path.join(config.save_model, 'knn_num_neighbor_' + str(config.nb_teachers))
      utils.mkdir_if_missing(dir_path)
      filepath = dir_path + '_knn_voting.npy' #NOLINT(long-line)

      # Dump student noisy labels array
      with open(filepath, 'wb') as file_obj:
        np.save(file_obj, teachers_preds)

    return picked_stdnt_data, stdnt_labels, stdnt_test_data, stdnt_test_labels
Ejemplo n.º 23
0
def dataset_mpc(dataset, imset, seed=1, n=100, out_dir=None, crop_size=80):
    # Load the building
    d = sbpd.get_dataset(dataset, imset)
    ns = d.get_imset()

    camera_param = utils.Foo(width=225,
                             height=225,
                             z_near=0.01,
                             z_far=20.0,
                             fov_horizontal=60.,
                             fov_vertical=60.,
                             modalities=['rgb'],
                             img_channels=3,
                             im_resize=1.)
    r_obj = sr.get_r_obj(camera_param)

    for name in ns:
        for flip in [True, False]:

            tt = d.load_data(name, flip)
            tt.set_r_obj(r_obj)
            tt.load_building_into_scene()

            traversible_cc, resolution = tt.traversible, tt.map.resolution
            traversible_map = tt.map.traversible * 1.
            starts, ends = mu.sample_positions_on_map(seed, traversible_cc,
                                                      resolution, n)

            # Crop out map from this location.
            loc = starts * 1.
            diff = ends - starts
            theta = np.arctan2(diff[:, 1], diff[:, 0])[:, np.newaxis]
            x_axis = np.concatenate([np.cos(theta), np.sin(theta)], axis=1)
            y_axis = np.concatenate(
                [np.cos(theta + np.pi / 2.),
                 np.sin(theta + np.pi / 2.)],
                axis=1)
            crops = mu.generate_egocentric_maps([traversible_map], [1.0],
                                                [crop_size],
                                                starts,
                                                x_axis,
                                                y_axis,
                                                dst_theta=np.pi / 2.0)

            # Render out images for each location.
            nodes = np.concatenate([loc, theta / tt.robot.delta_theta], axis=1)
            imgs = tt.render_nodes(nodes)
            r_obj.clear_scene()

            # Write the maps and the images in a directory.
            if out_dir is not None:
                out_dir_ = os.path.join(
                    out_dir, '{:d}'.format(seed),
                    '{:s}_{:s}_{:d}'.format(dataset, name, flip))
                logging.error('out_dir: %s', out_dir_)
                utils.mkdir_if_missing(out_dir_)
                for i in range(n):
                    file_name = os.path.join(out_dir_,
                                             'img_{:06d}.jpg'.format(i))
                    cv2.imwrite(file_name,
                                imgs[i][:, :, ::-1].astype(np.uint8))
                    file_name = os.path.join(out_dir_,
                                             'map_{:06d}.png'.format(i))
                    cv2.imwrite(file_name,
                                (crops[0][i, :, :] * 255).astype(np.uint8))

                out_file_name = os.path.join(
                    out_dir, 'maps',
                    '{:s}_{:s}_{:d}.pkl'.format(dataset, name, flip))
                vv = vars(tt.map)
                vv['traversible_cc'] = tt.traversible
                # save_variables(out_file_name, list(vv.values()), list(vv.keys()), True)

                output_file = os.path.join(out_dir_, 'vis.html')
                create_webpage(output_file, n)
Ejemplo n.º 24
0
def main(args):
    s_ = time.time()

    #  训练日志保存
    log_dir = args.log_dir
    mkdir_if_missing(log_dir)

    sys.stdout = logging.Logger(os.path.join(log_dir, 'log.txt'))
    display(args)

    if args.r is None:
        model = models.create(args.net)
        model = load_parameter(model)

    else:
        # resume model
        print('Resume from model at Epoch %d' % args.start)
        model = torch.load(args.r)

    model = model.cuda()
    torch.save(model, os.path.join(log_dir, 'model.pkl'))
    print('initial model is save at %s' % log_dir)
    # fine tune the model: the learning rate for pre-trained parameter is 1/10
    new_param_ids = set.union(
        set(map(id, model.Embedding.parameters())),
        set(map(id, model.attention_blocks.parameters())))

    new_params = [p for p in model.parameters() if id(p) in new_param_ids]

    base_params = [p for p in model.parameters() if id(p) not in new_param_ids]
    param_groups = [{
        'params': base_params,
        'lr_mult': 0.0
    }, {
        'params': new_params,
        'lr_mult': 1.0
    }]

    optimizer = torch.optim.Adam(param_groups,
                                 lr=args.lr,
                                 weight_decay=args.weight_decay)

    if args.loss == 'bin':
        criterion = losses.create(args.loss,
                                  margin=args.margin,
                                  alpha=args.alpha).cuda()
        Div = losses.create('div').cuda()
    else:
        criterion = losses.create(args.loss).cuda()

    data = DataSet.create(args.data, root=None)
    train_loader = torch.utils.data.DataLoader(
        data.train,
        batch_size=args.BatchSize,
        sampler=RandomIdentitySampler(data.train,
                                      num_instances=args.num_instances),
        drop_last=True,
        num_workers=args.nThreads)

    # save the train information
    epoch_list = list()
    loss_list = list()
    pos_list = list()
    neg_list = list()

    for epoch in range(args.start, args.epochs):
        epoch_list.append(epoch)

        running_loss = 0.0
        divergence = 0.0
        running_pos = 0.0
        running_neg = 0.0

        if epoch == 2:
            param_groups[0]['lr_mult'] = 0.1

        for i, data in enumerate(train_loader, 0):
            inputs, labels = data
            # wrap them in Variable
            inputs = Variable(inputs.cuda())

            # type of labels is Variable cuda.Longtensor
            labels = Variable(labels).cuda()

            optimizer.zero_grad()

            embed_feat = model(inputs)

            loss, inter_, dist_ap, dist_an = criterion(embed_feat, labels)
            div = Div(embed_feat)

            loss_ = loss + args.theta * div
            if not type(loss) == torch.Tensor:
                print('One time con not back-ward')
                continue

            loss_.backward()
            optimizer.step()

            running_loss += loss.item()
            divergence += div.item()
            running_neg += dist_an
            running_pos += dist_ap

            if epoch == 0 and i == 0:
                print(50 * '#')
                print('Train Begin -- HA-HA-HA-HA-AH-AH-AH-AH --')

        loss_list.append(running_loss)
        pos_list.append(running_pos / i)
        neg_list.append(running_neg / i)

        print(
            '[Epoch %05d]\t Loss: %.2f \t Divergence: %.2f \t Accuracy: %.2f \t Pos-Dist: %.2f \t Neg-Dist: %.2f'
            % (epoch + 1, running_loss, divergence, inter_, dist_ap, dist_an))

        if epoch % args.save_step == 0:
            torch.save(model, os.path.join(log_dir, '%d_model.pkl' % epoch))
    np.savez(os.path.join(log_dir, "result.npz"),
             epoch=epoch_list,
             loss=loss_list,
             pos=pos_list,
             neg=neg_list)
    t = time.time() - s_
    print('training takes %.2f hour' % (t / 3600))
Ejemplo n.º 25
0
parser.add_argument('-orth_cof',
                    type=float,
                    default=0e-3,
                    help='try to make the last linear weight matrix to '
                    'approximate the orthogonal matrix')

args = parser.parse_args()

print(args.nums)
print(type(args.nums))

if args.log_dir is None:
    log_dir = os.path.join('checkpoints', args.loss)
else:
    log_dir = os.path.join('checkpoints', args.log_dir)
mkdir_if_missing(log_dir)
# write log
sys.stdout = logging.Logger(os.path.join(log_dir, 'log.txt'))

#  display information of current training
print('train on dataset %s' % args.data)
print('batch size is: %d' % args.BatchSize)
print('num_instance is %d' % args.num_instances)
print('dimension of the embedding space is %d' % args.dim)
print('log dir is: %s' % args.log_dir)
print('the network is : %s' % args.net)
print('loss function for training is: %s' % args.loss)
print('learn rate : %f' % args.lr)
print('base parameter de learn rate is : %f' % args.base)
print('the orthogonal weight regular is %f ' % args.orth_cof)
Ejemplo n.º 26
0
def prepare_student_data(nb_teachers, save=False):
    """
  Takes a dataset name and the size of the teacher ensemble and prepares
  training data for the student model, according to parameters indicated
  in flags above.
  :param dataset: string corresponding to mnist, cifar10, or svhn
  :param nb_teachers: number of teachers (in the ensemble) to learn from
  :param save: if set to True, will dump student training labels predicted by
               the ensemble of teachers (with Laplacian noise) as npy files.
               It also dumps the clean votes for each class (without noise) and
               the labels assigned by teachers
  :return: pairs of (data, labels) to be used for student training and testing
  """

    # Load the dataset
    if config.dataset == 'celeba':
        dataset = data_manager.init_img_dataset(root=config.data_dir,
                                                name=config.dataset)
        test_data = dataset.test_data
        test_labels = dataset.test_label

    elif config.dataset == 'market':
        data_dir = '../dataset/market1501'
        test_dataset = Test_Dataset(data_dir,
                                    dataset_name=dataset_dict[config.dataset],
                                    query_gallery='gallery')
        test_data = test_dataset.data
        test_labels = test_dataset.label
        test_labels = np.array(test_labels, dtype=np.int32)
    else:
        print("Check value of dataset flag")
        return False

    # Make sure there is data leftover to be used as a test set
    assert config.stdnt_share < len(test_data)

    # Prepare [unlabeled] student training data (subset of test set)
    stdnt_data = test_data[:config.stdnt_share]
    # Compute teacher predictions for student training data
    if config.reuse_vote:
        #reuse previous saved clean votes, but stdnt_share maybe various
        #dir_path = os.path.join(config.save_model,'pate_'+str(config.nb_teachers))
        dir_path = os.path.join(config.save_model, config.dataset)
        dir_path = os.path.join(dir_path,
                                'pate_num_teacher_' + str(config.nb_teachers))
        utils.mkdir_if_missing(dir_path)
        filepath = dir_path + '/_teacher_votes.npy'
        # Prepare filepath for numpy dump of labels produced by noisy aggregation
        teachers_preds = np.load(filepath)
        teachers_preds = teachers_preds[:config.stdnt_share]
        ori_filepath = dir_path + '_ori_teacher_votes.npy'
        ori_teachers_preds = np.load(ori_filepath)
    else:
        teachers_preds = ensemble_preds(nb_teachers, stdnt_data)
        ori_teachers_preds = teachers_preds  # in the shape of (nb_teacher, nb_data, dim)
        teachers_preds = np.sum(teachers_preds, axis=0)
        dir_path = os.path.join(config.save_model, config.dataset)
        dir_path = os.path.join(dir_path,
                                'pate_num_teacher_' + str(config.nb_teachers))
        utils.mkdir_if_missing(dir_path)
        filepath = dir_path + '/_teacher_votes.npy'
        ori_filepath = dir_path + '_ori_teacher_votes.npy'
        with open(filepath, mode='wb') as file_obj:
            np.save(file_obj, teachers_preds)
        with open(ori_filepath, mode='wb') as file_obj:
            np.save(file_obj, ori_teachers_preds)

    if config.use_tau:
        tau_teachers_preds = np.zeros(teachers_preds.shape)
        for idx in range(len(tau_teachers_preds)):
            tau_teachers_preds[idx] = tau_limit(ori_teachers_preds[:, idx, :])

        preds_tau = np.asarray(tau_teachers_preds, dtype=np.float32)
        print('preds_tau', preds_tau[1, ])
        count_zero_list = config.nb_teachers * np.ones(
            [config.stdnt_share, config.nb_labels]) - teachers_preds
        print('shape of count_zero', count_zero_list.shape)
        idx, stdnt_labels = aggregation.aggregation_knn(
            teachers_preds, config.gau_scale, count_zero_list=count_zero_list)
        acct.compose_mechanism(gaussian, coeff=config.stdnt_share)
    else:
        acct.compose_mechanism(gaussian, coeff=config.stdnt_share)
        idx, stdnt_labels = aggregation.aggregation_knn(
            teachers_preds, config.gau_scale)
    print('shape of teachers_pred', teachers_preds.shape)
    # Aggregate teacher predictions to get student training labels

    # Print accuracy of aggregated label
    ac_ag_labels = hamming_accuracy(stdnt_labels,
                                    test_labels[:config.stdnt_share],
                                    torch=False)
    print("Accuracy of the aggregated labels: " + str(ac_ag_labels))
    current_eps = acct.get_eps(config.delta)
    print('eps after data independent composition', current_eps)
    # Store unused part of test set for use as a test set after student training
    stdnt_test_data = test_data[config.stdnt_share:]
    stdnt_test_labels = test_labels[config.stdnt_share:]

    return stdnt_data, stdnt_labels, stdnt_test_data, stdnt_test_labels
    '/Users/idofarhi/Documents/Thesis/Data/frames/' + folder_name + '/4CH'
}

# Test data:
# source_directory = {'apex': '/Users/idofarhi/Documents/Thesis/Data/test_set/frames/raw/apex',
#                     'mitral': '/Users/idofarhi/Documents/Thesis/Data/test_set/frames/raw/mitral',
#                     'papillary': '/Users/idofarhi/Documents/Thesis/Data/test_set/frames/raw/papillary'}
#
# target_directory = {'apex': '/Users/idofarhi/Documents/Thesis/Data/test_set/frames/' + folder_name + '/apex',
#                     'mitral': '/Users/idofarhi/Documents/Thesis/Data/test_set/frames/' + folder_name + '/mitral',
#                     'papillary': '/Users/idofarhi/Documents/Thesis/Data/test_set/frames/' + folder_name + '/papillary'}

#################################################

for folder in target_directory.values():
    mkdir_if_missing(folder)

video_list = {}
file_names = get_frame_sequences(source_directory['apex'][:-5],
                                 class_folders=view_list)
for view in view_list:
    video_list[view] = list(file_names[view].keys())

# for each view...
for view in view_list:
    # get a list of all files in the relevant view directory (file list)
    file_list = get_files_list(source_directory[view])
    print('Running view:', view)

    # run through each video...
    for video in tqdm(video_list[view]):
Ejemplo n.º 28
0
def main(args):

    #  训练日志保存
    log_dir = os.path.join('checkpoints', args.log_dir)
    mkdir_if_missing(log_dir)

    sys.stdout = logging.Logger(os.path.join(log_dir, 'log.txt'))
    display(args)

    if args.r is None:
        model = models.create(args.net, Embed_dim=args.dim)
        # load part of the model
        model_dict = model.state_dict()
        # print(model_dict)
        if args.net == 'bn':
            pretrained_dict = torch.load(
                'pretrained_models/bn_inception-239d2248.pth')
        else:
            pretrained_dict = torch.load(
                'pretrained_models/inception_v3_google-1a9a5a14.pth')

        pretrained_dict = {
            k: v
            for k, v in pretrained_dict.items() if k in model_dict
        }

        model_dict.update(pretrained_dict)

        model.load_state_dict(model_dict)
    else:
        # resume model
        model = torch.load(args.r)

    model = model.cuda()

    torch.save(model, os.path.join(log_dir, 'model.pkl'))
    print('initial model is save at %s' % log_dir)

    # fine tune the model: the learning rate for pre-trained parameter is 1/10
    new_param_ids = set(map(id, model.Embed.parameters()))

    new_params = [p for p in model.parameters() if id(p) in new_param_ids]

    base_params = [p for p in model.parameters() if id(p) not in new_param_ids]
    param_groups = [{
        'params': base_params,
        'lr_mult': 0.1
    }, {
        'params': new_params,
        'lr_mult': 1.0
    }]

    optimizer = torch.optim.Adam(param_groups,
                                 lr=args.lr,
                                 weight_decay=args.weight_decay)
    criterion = losses.create(args.loss, alpha=args.alpha, k=args.k).cuda()

    data = DataSet.create(args.data, root=None, test=False)
    train_loader = torch.utils.data.DataLoader(
        data.train,
        batch_size=args.BatchSize,
        sampler=RandomIdentitySampler(data.train,
                                      num_instances=args.num_instances),
        drop_last=True,
        num_workers=args.nThreads)

    for epoch in range(args.start, args.epochs):
        running_loss = 0.0
        for i, data in enumerate(train_loader, 0):
            inputs, labels = data
            # wrap them in Variable
            inputs = Variable(inputs.cuda())
            labels = Variable(labels).cuda()

            optimizer.zero_grad()

            embed_feat = model(inputs)

            loss, inter_, dist_ap, dist_an = criterion(embed_feat, labels)
            if args.orth > 0:
                loss = orth_reg(model, loss, cof=args.orth)
            loss.backward()
            optimizer.step()
            running_loss += loss.data[0]
            if epoch == 0 and i == 0:
                print(50 * '#')
                print('Train Begin -- HA-HA-HA')

        print(
            '[Epoch %05d]\t Loss: %.3f \t Accuracy: %.3f \t Pos-Dist: %.3f \t Neg-Dist: %.3f'
            % (epoch + 1, running_loss, inter_, dist_ap, dist_an))

        if epoch % args.save_step == 0:
            torch.save(model, os.path.join(log_dir, '%d_model.pkl' % epoch))
Ejemplo n.º 29
0
    def _preprocess(self):
        """
        This function is a bit complex and ugly, what it does is
        1. Extract data from cuhk-03.mat and save as png images.
        2. Create 20 classic splits. (Li et al. CVPR'14)
        3. Create new split. (Zhong et al. CVPR'17)
        """
        print("Note: if root path is changed, the previously generated json files need to be re-generated (delete them first)")
        if osp.exists(self.imgs_labeled_dir) and \
           osp.exists(self.imgs_detected_dir) and \
           osp.exists(self.split_classic_det_json_path) and \
           osp.exists(self.split_classic_lab_json_path) and \
           osp.exists(self.split_new_det_json_path) and \
           osp.exists(self.split_new_lab_json_path):
            return

        mkdir_if_missing(self.imgs_detected_dir)
        mkdir_if_missing(self.imgs_labeled_dir)

        print("Extract image data from {} and save as png".format(self.raw_mat_path))
        mat = h5py.File(self.raw_mat_path, 'r')

        def _deref(ref):
            return mat[ref][:].T

        def _process_images(img_refs, campid, pid, save_dir):
            img_paths = [] # Note: some persons only have images for one view
            for imgid, img_ref in enumerate(img_refs):
                img = _deref(img_ref)
                # skip empty cell
                if img.size == 0 or img.ndim < 3: continue
                # images are saved with the following format, index-1 (ensure uniqueness)
                # campid: index of camera pair (1-5)
                # pid: index of person in 'campid'-th camera pair
                # viewid: index of view, {1, 2}
                # imgid: index of image, (1-10)
                viewid = 1 if imgid < 5 else 2
                img_name = '{:01d}_{:03d}_{:01d}_{:02d}.png'.format(campid+1, pid+1, viewid, imgid+1)
                img_path = osp.join(save_dir, img_name)
                imsave(img_path, img)
                img_paths.append(img_path)
            return img_paths

        def _extract_img(name):
            print("Processing {} images (extract and save) ...".format(name))
            meta_data = []
            imgs_dir = self.imgs_detected_dir if name == 'detected' else self.imgs_labeled_dir
            for campid, camp_ref in enumerate(mat[name][0]):
                camp = _deref(camp_ref)
                num_pids = camp.shape[0]
                for pid in range(num_pids):
                    img_paths = _process_images(camp[pid,:], campid, pid, imgs_dir)
                    assert len(img_paths) > 0, "campid{}-pid{} has no images".format(campid, pid)
                    meta_data.append((campid+1, pid+1, img_paths))
                print("done camera pair {} with {} identities".format(campid+1, num_pids))
            return meta_data

        meta_detected = _extract_img('detected')
        meta_labeled = _extract_img('labeled')

        def _extract_classic_split(meta_data, test_split):
            train, test = [], []
            num_train_pids, num_test_pids = 0, 0
            num_train_imgs, num_test_imgs = 0, 0
            for i, (campid, pid, img_paths) in enumerate(meta_data):
                
                if [campid, pid] in test_split:
                    for img_path in img_paths:
                        camid = int(osp.basename(img_path).split('_')[2])
                        test.append((img_path, num_test_pids, camid))
                    num_test_pids += 1
                    num_test_imgs += len(img_paths)
                else:
                    for img_path in img_paths:
                        camid = int(osp.basename(img_path).split('_')[2])
                        train.append((img_path, num_train_pids, camid))
                    num_train_pids += 1
                    num_train_imgs += len(img_paths)
            return train, num_train_pids, num_train_imgs, test, num_test_pids, num_test_imgs

        print("Creating classic splits (# = 20) ...")
        splits_classic_det, splits_classic_lab = [], []
        for split_ref in mat['testsets'][0]:
            test_split = _deref(split_ref).tolist()

            # create split for detected images
            train, num_train_pids, num_train_imgs, test, num_test_pids, num_test_imgs = \
                _extract_classic_split(meta_detected, test_split)
            splits_classic_det.append({
                'train': train, 'query': test, 'gallery': test,
                'num_train_pids': num_train_pids, 'num_train_imgs': num_train_imgs,
                'num_query_pids': num_test_pids, 'num_query_imgs': num_test_imgs,
                'num_gallery_pids': num_test_pids, 'num_gallery_imgs': num_test_imgs,
            })

            # create split for labeled images
            train, num_train_pids, num_train_imgs, test, num_test_pids, num_test_imgs = \
                _extract_classic_split(meta_labeled, test_split)
            splits_classic_lab.append({
                'train': train, 'query': test, 'gallery': test,
                'num_train_pids': num_train_pids, 'num_train_imgs': num_train_imgs,
                'num_query_pids': num_test_pids, 'num_query_imgs': num_test_imgs,
                'num_gallery_pids': num_test_pids, 'num_gallery_imgs': num_test_imgs,
            })
        
        write_json(splits_classic_det, self.split_classic_det_json_path)
        write_json(splits_classic_lab, self.split_classic_lab_json_path)

        def _extract_set(filelist, pids, pid2label, idxs, img_dir, relabel):
            tmp_set = []
            unique_pids = set()
            for idx in idxs:
                img_name = filelist[idx][0]
                camid = int(img_name.split('_')[2])
                pid = pids[idx]
                if relabel: pid = pid2label[pid]
                img_path = osp.join(img_dir, img_name)
                tmp_set.append((img_path, int(pid), camid))
                unique_pids.add(pid)
            return tmp_set, len(unique_pids), len(idxs)

        def _extract_new_split(split_dict, img_dir):
            train_idxs = split_dict['train_idx'].flatten() - 1 # index-0
            pids = split_dict['labels'].flatten()
            train_pids = set(pids[train_idxs])
            pid2label = {pid: label for label, pid in enumerate(train_pids)}
            query_idxs = split_dict['query_idx'].flatten() - 1
            gallery_idxs = split_dict['gallery_idx'].flatten() - 1
            filelist = split_dict['filelist'].flatten()
            train_info = _extract_set(filelist, pids, pid2label, train_idxs, img_dir, relabel=True)
            query_info = _extract_set(filelist, pids, pid2label, query_idxs, img_dir, relabel=False)
            gallery_info = _extract_set(filelist, pids, pid2label, gallery_idxs, img_dir, relabel=False)
            return train_info, query_info, gallery_info

        print("Creating new splits for detected images (767/700) ...")
        train_info, query_info, gallery_info = _extract_new_split(
            loadmat(self.split_new_det_mat_path),
            self.imgs_detected_dir,
        )
        splits = [{
            'train': train_info[0], 'query': query_info[0], 'gallery': gallery_info[0],
            'num_train_pids': train_info[1], 'num_train_imgs': train_info[2],
            'num_query_pids': query_info[1], 'num_query_imgs': query_info[2],
            'num_gallery_pids': gallery_info[1], 'num_gallery_imgs': gallery_info[2],
        }]
        write_json(splits, self.split_new_det_json_path)

        print("Creating new splits for labeled images (767/700) ...")
        train_info, query_info, gallery_info = _extract_new_split(
            loadmat(self.split_new_lab_mat_path),
            self.imgs_labeled_dir,
        )
        splits = [{
            'train': train_info[0], 'query': query_info[0], 'gallery': gallery_info[0],
            'num_train_pids': train_info[1], 'num_train_imgs': train_info[2],
            'num_query_pids': query_info[1], 'num_query_imgs': query_info[2],
            'num_gallery_pids': gallery_info[1], 'num_gallery_imgs': gallery_info[2],
        }]
        write_json(splits, self.split_new_lab_json_path)
Ejemplo n.º 30
0
def track_nuscenes(data_root: str,
                   detection_file: str,
                   save_dir: str,
                   eval_set: str = 'val',
                   covariance_id: int = 0,
                   match_distance: str = 'iou',
                   match_threshold: float = 0.1,
                   match_algorithm: str = 'h',
                   use_angular_velocity: bool = False):
    '''
  submission {
    "meta": {
        "use_camera":   <bool>  -- Whether this submission uses camera data as an input.
        "use_lidar":    <bool>  -- Whether this submission uses lidar data as an input.
        "use_radar":    <bool>  -- Whether this submission uses radar data as an input.
        "use_map":      <bool>  -- Whether this submission uses map data as an input.
        "use_external": <bool>  -- Whether this submission uses external data as an input.
    },
    "results": {
        sample_token <str>: List[sample_result] -- Maps each sample_token to a list of sample_results.
    }
  }
  
  '''
    if 'train' in eval_set:
        version = 'v1.0-trainval'
    elif 'val' in eval_set:
        version = 'v1.0-trainval'
    elif 'mini' in eval_set:
        version = 'v1.0-mini'
    elif 'test' in eval_set:
        version = 'v1.0-test'
    else:
        version = eval_set
        print("WARNING: Unknown subset version: '{}'".format(version))

    nusc = NuScenes(version=version, dataroot=data_root, verbose=True)
    mkdir_if_missing(save_dir)
    output_path = os.path.join(save_dir, 'probabilistic_tracking_results.json')

    results = {}

    total_time = 0.0
    total_frames = 0

    with open(detection_file) as f:
        data = json.load(f)
    assert 'results' in data, 'Error: No field `results` in result file. Please note that the result format changed.' \
      'See https://www.nuscenes.org/object-detection for more information.'

    all_results = EvalBoxes.deserialize(data['results'], DetectionBox)
    meta = data['meta']
    print('meta: ', meta)
    print("Loaded results from {}. Found detections for {} samples.".format(
        detection_file, len(all_results.sample_tokens)))

    processed_scene_tokens = set()
    for sample_token_idx in tqdm(range(len(all_results.sample_tokens))):
        sample_token = all_results.sample_tokens[sample_token_idx]
        scene_token = nusc.get('sample', sample_token)['scene_token']
        if scene_token in processed_scene_tokens:
            continue
        first_sample_token = nusc.get('scene',
                                      scene_token)['first_sample_token']
        current_sample_token = first_sample_token

        mot_trackers = {
            tracking_name: AB3DMOT(covariance_id,
                                   tracking_name=tracking_name,
                                   use_angular_velocity=use_angular_velocity,
                                   tracking_nuscenes=True)
            for tracking_name in NUSCENES_TRACKING_NAMES
        }

        while current_sample_token != '':
            results[current_sample_token] = []
            dets = {
                tracking_name: []
                for tracking_name in NUSCENES_TRACKING_NAMES
            }
            info = {
                tracking_name: []
                for tracking_name in NUSCENES_TRACKING_NAMES
            }
            for box in all_results.boxes[current_sample_token]:
                if box.detection_name not in NUSCENES_TRACKING_NAMES:
                    continue
                q = Quaternion(box.rotation)
                angle = q.angle if q.axis[2] > 0 else -q.angle
                #print('box.rotation,  angle, axis: ', box.rotation, q.angle, q.axis)
                #print('box.rotation,  angle, axis: ', q.angle, q.axis)
                #[h, w, l, x, y, z, rot_y]
                detection = np.array([
                    box.size[2], box.size[0], box.size[1], box.translation[0],
                    box.translation[1], box.translation[2], angle
                ])
                #print('detection: ', detection)
                information = np.array([box.detection_score])
                dets[box.detection_name].append(detection)
                info[box.detection_name].append(information)

            dets_all = {
                tracking_name: {
                    'dets': np.array(dets[tracking_name]),
                    'info': np.array(info[tracking_name])
                }
                for tracking_name in NUSCENES_TRACKING_NAMES
            }

            total_frames += 1
            start_time = time.time()
            for tracking_name in NUSCENES_TRACKING_NAMES:
                if dets_all[tracking_name]['dets'].shape[0] > 0:
                    trackers = mot_trackers[tracking_name].update(
                        dets_all[tracking_name], match_distance,
                        match_threshold, match_algorithm, scene_token)
                    # (N, 9)
                    # (h, w, l, x, y, z, rot_y), tracking_id, tracking_score
                    # print('trackers: ', trackers)
                    for i in range(trackers.shape[0]):
                        sample_result = format_sample_result(
                            current_sample_token, tracking_name, trackers[i])
                        results[current_sample_token].append(sample_result)
            cycle_time = time.time() - start_time
            total_time += cycle_time

            # get next frame and continue the while loop
            current_sample_token = nusc.get('sample',
                                            current_sample_token)['next']

        # left while loop and mark this scene as processed
        processed_scene_tokens.add(scene_token)

    # finished tracking all scenes, write output data
    output_data = {'meta': meta, 'results': results}
    with open(output_path, 'w') as outfile:
        json.dump(output_data, outfile)

    print("Total Tracking took: %.3f for %d frames or %.1f FPS" %
          (total_time, total_frames, total_frames / total_time))
Ejemplo n.º 31
0
        sys.exit(1)

    data_type = sys.argv[2]

    if data_type != 'vehicle' and data_type != 'pedestrian' and data_type != 'cyclist':
        print("Usage: python main.py data.bin vehicle")
        sys.exit(1)

    data_root = "./dataset"
    data_file_name = fileparts(sys.argv[1])[1] + fileparts(sys.argv[1])[2]

    result_root = './results'
    result_file_name = data_file_name[:-4] + "_preds.bin"

    data_file = os.path.join(data_root, data_file_name)
    mkdir_if_missing(result_root)

    dataset = metrics_pb2.Objects()

    with open(data_file, 'rb') as f:
        buf = f.read()
        dataset.ParseFromString(buf)

    contexts = []
    if len(dataset.objects) != 0:
        context = dataset.objects[0].context_name
        start = 0
        i = 1
        for data in dataset.objects[1:]:
            if data.context_name != context:
                contexts.append((start, i))
Ejemplo n.º 32
0
def train_fun(args, train_loader, feat_loader, current_task, fisher={}, prototype={}):

    log_dir = args.log_dir
    mkdir_if_missing(log_dir)

    sys.stdout = logging.Logger(os.path.join(log_dir, 'log.txt'))
    display(args)

    model = models.create(args.net, Embed_dim=args.dim)
    # load part of the model
    if args.method == 'Independent' or current_task == 0:
        model_dict = model.state_dict()

        if args.net == 'resnet32':
            pickle.load = partial(pickle.load, encoding="latin1")
            pickle.Unpickler = partial(pickle.Unpickler, encoding="latin1")
            pretrained_dict = torch.load(
                'pretrained_models/Finetuning_0_task_0_200_model_task2_cifar100_seed1993.pkl',
                map_location=lambda storage, loc: storage, pickle_module=pickle)
            pretrained_dict = pretrained_dict.state_dict()
            pretrained_dict = {k: v for k, v in pretrained_dict.items(
            ) if k in model_dict and 'fc' not in k}
            model_dict.update(pretrained_dict)
            model.load_state_dict(model_dict)

        elif args.net == 'resnet18' and args.data == 'imagenet_sub':
            pickle.load = partial(pickle.load, encoding="latin1")
            pickle.Unpickler = partial(pickle.Unpickler, encoding="latin1")
            pretrained_dict = torch.load(
                'pretrained_models/Finetuning_0_task_0_200_model_task2_imagenet_sub_seed1993.pkl',
                map_location=lambda storage, loc: storage, pickle_module=pickle)
            pretrained_dict = pretrained_dict.state_dict()
            pretrained_dict = {k: v for k, v in pretrained_dict.items(
            ) if k in model_dict and 'fc' not in k}
            model_dict.update(pretrained_dict)
            model.load_state_dict(model_dict)

        else:
            print (' Oops!  That was no valid models. ')

    if args.method != 'Independent' and current_task > 0:
        model = torch.load(os.path.join(log_dir, args.method + '_' + args.exp +
                                        '_task_' + str(current_task-1) + '_%d_model.pkl' % int(args.epochs-1)))
        model_old = deepcopy(model)
        model_old.eval()
        model_old = freeze_model(model_old)

    model = model.cuda()
    torch.save(model, os.path.join(log_dir, args.method + '_' +
                                   args.exp + '_task_' + str(current_task) + '_pre_model.pkl'))
    print('initial model is save at %s' % log_dir)

    # fine tune the model: the learning rate for pre-trained parameter is 1/10
    new_param_ids = set(map(id, model.Embed.parameters()))

    new_params = [p for p in model.parameters() if
                  id(p) in new_param_ids]

    base_params = [p for p in model.parameters() if
                   id(p) not in new_param_ids]
    param_groups = [
        {'params': base_params, 'lr_mult': 0.1},
        {'params': new_params, 'lr_mult': 1.0}]

    criterion = losses.create(args.loss, margin=args.margin, num_instances=args.num_instances).cuda()
    optimizer = torch.optim.Adam(
        param_groups, lr=args.lr, weight_decay=args.weight_decay)
    scheduler = StepLR(optimizer, step_size=200, gamma=0.1)

    if args.data == 'cifar100' or args.data == 'imagenet_sub':
        if current_task > 0:
            model.eval()

    for epoch in range(args.start, args.epochs):

        running_loss = 0.0
        running_lwf = 0.0
        scheduler.step()

        for i, data in enumerate(train_loader, 0):
            inputs, labels = data
            # wrap them in Variable
            inputs = Variable(inputs.cuda())
            labels = Variable(labels).cuda()
            optimizer.zero_grad()
            _, embed_feat = model(inputs)

            if current_task == 0:
                loss_aug = 0*torch.sum(embed_feat)
            else:
                if args.method == 'Finetuning' or args.method == 'Independent':
                    loss_aug = 0*torch.sum(embed_feat)
                elif args.method == 'LwF':
                    _, embed_feat_old = model_old(inputs)
                    loss_aug = args.tradeoff * \
                        torch.sum((embed_feat-embed_feat_old).pow(2))/2.
                elif args.method == 'EWC' or args.method == 'MAS':
                    loss_aug = 0
                    for (name, param), (_, param_old) in zip(model.named_parameters(), model_old.named_parameters()):
                        loss_aug += args.tradeoff * \
                            torch.sum(fisher[name]*(param_old-param).pow(2))/2.

            if args.loss == 'MSLoss':
                loss = criterion(embed_feat, labels)
                inter_ = 0
                dist_ap = 0
                dist_an = 0
            else:
                loss, inter_, dist_ap, dist_an = criterion(embed_feat, labels)
            loss += loss_aug

            loss.backward()
            optimizer.step()
            running_loss += loss.data[0]
            running_lwf += loss_aug.data[0]
            if epoch == 0 and i == 0:
                print(50*'#')
                print('Train Begin -- HA-HA-HA')

        print('[Epoch %05d]\t Total Loss: %.3f \t LwF Loss: %.3f \t Accuracy: %.3f \t Pos-Dist: %.3f \t Neg-Dist: %.3f'
              % (epoch + 1,  running_loss, running_lwf, inter_, dist_ap, dist_an))

        if epoch % args.save_step == 0:
            torch.save(model, os.path.join(log_dir, args.method + '_' +
                                           args.exp + '_task_' + str(current_task) + '_%d_model.pkl' % epoch))

    if args.method == 'EWC' or args.method == 'MAS':
        fisher = fisher_matrix_diag(
            model, criterion, train_loader, number_samples=500)
        return fisher