Esempio n. 1
0
def count_and_cut(input_path, count):
    annot_path = input_path + "\\Annotations"
    img_path = input_path + "\\JPEGImages"
    save_folder = input_path + "\\Cutout"
    io_utils.mkdir(save_folder)
    io_utils.remove_all(save_folder)
    annots = [os.path.join(annot_path, s) for s in os.listdir(annot_path)]
    for annot in annots:  # read a xml
        try:
            et = ET.parse(annot)
            element = et.getroot()
            element_objs = element.findall('object')
            img_name = element.find('filename').text

            new_img_path = img_path + "\\" + img_name  # find uncut image
            for element_obj in element_objs:
                class_name = element_obj.find('name').text  # find label
                count[class_name][img_name] = count[class_name][img_name] + 1
                save_path = save_folder + "\\" + class_name
                save_name = img_name.split('.')[0] + '-' + str(
                    count[class_name][img_name]) + '.jpg'
                io_utils.mkdir(save_path)
                xmin = int(element_obj.find("bndbox").find(
                    "xmin").text)  # find bbox boundary
                ymin = int(element_obj.find("bndbox").find("ymin").text)
                xmax = int(element_obj.find("bndbox").find("xmax").text)
                ymax = int(element_obj.find("bndbox").find("ymax").text)
                box = (xmin, ymin, xmax, ymax)
                img = Image.open(new_img_path)
                region = img.crop(box)
                region.save(save_path + "\\" + save_name)
        except Exception as e:
            print('Exception: {}'.format(e))
            continue
    def weight_data_set(self, data_set, cv_generator):
        super().weight_data_set(data_set, cv_generator)

        data, labels = DataSets.load(data_set)
        cv = cv_generator(labels.shape[0])

        try:
            return PreComputedData.load(data_set, cv, "weight", self)
        except FileNotFoundError:

            print(
                "=> Generating feature {method}s of {data_set} ({cv}) with {feature_selector}"
                .format(method="weight",
                        data_set=data_set,
                        feature_selector=self.__name__,
                        cv=type(cv).__name__))

            try:
                cv_indices = PreComputedData.load_cv(data_set, cv)
            except FileNotFoundError:
                mkdir(PreComputedData.cv_dir(data_set, cv))

                cv_indices = list(cv)
                np.save(PreComputedData.cv_file_name(data_set, cv), cv_indices)

            weights = self.generate(data, labels, cv_indices, "weight")
            self.__save(data_set, cv, "weight", weights)

            return weights
Esempio n. 3
0
def _compression_with_anno(dir, output):
    if dir and output:
        JPEGImages_dir = os.path.join(dir, 'JPEGImages')
        Annotations_dir = os.path.join(dir, 'Annotations')
        images_paths = [
            os.path.join(JPEGImages_dir, s) for s in os.listdir(JPEGImages_dir)
        ]
        annos_paths = [
            os.path.join(Annotations_dir, s)
            for s in os.listdir(Annotations_dir)
        ]

        images_paths.sort()
        annos_paths.sort()

        save_dir = os.path.join(output, 'zip')
        io_utils.mkdir(save_dir)

        batch_size = 200
        image_batchs = [
            images_paths[i:i + batch_size]
            for i in range(0, len(images_paths), batch_size)
        ]
        anno_batchs = [
            annos_paths[i:i + batch_size]
            for i in range(0, len(annos_paths), batch_size)
        ]

        for idx, (image_batch,
                  anno_batch) in enumerate(zip(image_batchs, anno_batchs)):
            save_path = os.path.join(
                save_dir, '{}{}{}'.format(os.path.basename(output),
                                          '-{}'.format(idx), '.zip'))
            zip_list(image_batch, save_path)
            zip_list(anno_batch, save_path, mode='a')
Esempio n. 4
0
def process(data):
    src_img_path = data['filepath']
    bboxes = data['bboxes']
    class_count = {}
    for bbox in bboxes:
        class_label = bbox['class']
        if class_label not in class_count:
            class_count[class_label] = 0
        else:
            class_count[class_label] += 1

        box_xy = (bbox['x1'], bbox['y1'], bbox['x2'], bbox['y2'])

        # if class_count[class_label] == 0:
        #     bbox_name ="{}{}".format(data['image_name'].split('.')[0], '.jpg')
        # else:
        #     bbox_name ="{}-{}{}".format(data['image_name'].split('.')[0], class_count[class_label], '.jpg')

        bbox_name = "{}-{}{}".format(data['image_name'].split('.')[0],
                                     class_count[class_label], '.jpg')

        class_label_folder = os.path.join(save_path, class_label)
        io_utils.mkdir(class_label_folder)
        save_bbox_path = os.path.join(class_label_folder, bbox_name)

        crop_bbox(src_img_path, box_xy, save_bbox_path)
Esempio n. 5
0
def _copy_to_JPEGImages(parent_dir, src_dir):
    target_dir = os.path.join(parent_dir, 'JPEGImages/')
    io_utils.mkdir(target_dir)
    io_utils.remove_all(target_dir)
    for s in os.listdir(src_dir):
        file = os.path.join(src_dir, s)
        io_utils.copy(file, target_dir)
    def weight_data_set(self, data_set, cv_generator):
        super().weight_data_set(data_set, cv_generator)

        data, labels = DataSets.load(data_set)
        cv = cv_generator(labels.shape[0])

        try:
            return PreComputedData.load(data_set, cv, "weight", self)
        except FileNotFoundError:

            print("=> Generating feature {method}s of {data_set} ({cv}) with {feature_selector}".format(
                method="weight",
                data_set=data_set,
                feature_selector=self.__name__,
                cv=type(cv).__name__
            ))

            try:
                cv_indices = PreComputedData.load_cv(data_set, cv)
            except FileNotFoundError:
                mkdir(PreComputedData.cv_dir(data_set, cv))

                cv_indices = list(cv)
                np.save(PreComputedData.cv_file_name(data_set, cv), cv_indices)

            weights = self.generate(data, labels, cv_indices, "weight")
            self.__save(data_set, cv, "weight", weights)

            return weights
Esempio n. 7
0
def _rot90(parent_dir, image_dir):
    imgs_path = os.path.join(parent_dir, image_dir)
    imgs_out_path = os.path.join(parent_dir,
                                 '{}{}'.format(image_dir, '_with_rot90'))
    io_utils.delete_file_folder(imgs_out_path)
    io_utils.mkdir(imgs_out_path)

    images = [os.path.join(imgs_path, s) for s in os.listdir(imgs_path)]
    for image_file in images:
        try:
            img = cv2.imread(image_file)
            width = img.shape[0]
            height = img.shape[1]
            if width > height:
                image = np.array(np.rot90(img, 1))
                image = image.copy()
            else:
                image = img
            name = image_file.split('/')[-1]
            save_path = os.path.join(imgs_out_path, name)

            # don't need resize
            # image = cv2.resize(image, (int(image.shape[1] * 0.5), int(image.shape[0]*0.5)), interpolation=cv2.INTER_CUBIC)
            # print('resize:{}'.format(image.shape))

            cv2.imwrite(save_path, image)

            print(save_path)
        except Exception as e:
            print('Exception in pascal_voc_parser: {}'.format(e))
            continue

    return imgs_out_path
Esempio n. 8
0
def rename_image(data_path):

    if not os.path.isdir(data_path):
        print('input_path is not a dir: {}'.format(data_path))
        return

    # for data_path in input_path:
    annot_path = os.path.join(data_path, 'Annotations')
    imgs_path = os.path.join(data_path, 'JPEGImages')
    imgs_out_path = os.path.join(data_path, 'JPEGImages_marked')
    io_utils.mkdir(imgs_out_path)

    annots = [os.path.join(annot_path, s) for s in os.listdir(annot_path)]
    for annot in annots:
        try:
            et = ET.parse(annot)
            element = et.getroot()
            node = element.find('filename')
            element_filename = element.find('filename').text
            # a = element_filename[-3:]
            # print(a)
            if '.' in element_filename:
                pass
            else:
                node.text = element_filename[:-3] + '.' + 'jpg'
                print(annot)
                print(node.text)
                write_xml(et, annot)

        except Exception as e:
            print('Exception in pascal_voc_parser: {}'.format(e))
            continue
Esempio n. 9
0
def benchmark_3d_vertex_save(params, img_names_list, method='', dense=True):
    save_path = 'result/' + method + '/'
    mkdir(save_path)
    for i in range(params.shape[0]):
        lm = reconstruct_vertex(params[i], dense=True)
        fn = img_names_list[i]
        wfp = osp.join(save_path, fn.replace('.jpg', '.mat'))
        print(wfp)
        sio.savemat(wfp, {'vertex': lm})
    def save_results(self, file_name="output.csv", append=False):
        root_dir = DataSets.root_dir + "/results/" + type(self).__name__
        table = self.results_table(self.row_labels, self.col_labels, self.results)

        mkdir(root_dir)

        with open(root_dir + "/" + file_name, 'a' if append else 'w') as f:
            writer = csv.writer(f)
            writer.writerows(table)
Esempio n. 11
0
def rename_suffix_image(input_path):
    data_dir = os.path.join(input_path, 'all_data/JPEGImages')
    imgs_rename_path = '{}_rename'.format(data_dir)
    io_utils.delete_file_folder(imgs_rename_path)
    io_utils.mkdir(imgs_rename_path)
    for s in os.listdir(data_dir):
        old = os.path.join(data_dir, s)
        name = s.split('.')[0]
        new = os.path.join(imgs_rename_path, '{}.jpg'.format(name))
        io_utils.rename(old, new)
Esempio n. 12
0
def un_zip(file_name, output):
    """unzip zip file"""
    zip_file = zipfile.ZipFile(file_name)
    if os.path.isdir(output):
        pass
    else:
        mkdir(output)
    for names in zip_file.namelist():
        zip_file.extract(names, output)
    zip_file.close()
Esempio n. 13
0
    def save_results(self, file_name="output.csv", append=False):
        root_dir = DataSets.root_dir + "/results/" + type(self).__name__
        table = self.results_table(self.row_labels, self.col_labels,
                                   self.results)

        mkdir(root_dir)

        with open(root_dir + "/" + file_name, 'a' if append else 'w') as f:
            writer = csv.writer(f)
            writer.writerows(table)
    def save_results(self, filename=None):
        if filename is None:
            filename = type(self.benchmark).__name__

        mkdir(self.root_dir)

        np.save("{}/{}.npy".format(self.root_dir, filename), self.results)

        self.__write_dim_info(filename, 0, self.data_sets)
        self.__write_dim_info(filename, 1, [f.__name__ for f in self.feature_selectors])
        self.__write_dim_info(filename, 2, [m.__name__ for m in self.benchmark.get_measures()])
Esempio n. 15
0
def parse_args():
    parser = argparse.ArgumentParser(description='3DMM Fitting')
    parser.add_argument('-j', '--workers', default=6, type=int)
    parser.add_argument('--epochs', default=40, type=int)
    parser.add_argument('--start-epoch', default=1, type=int)
    parser.add_argument('-b', '--batch-size', default=128, type=int)
    parser.add_argument('-vb', '--val-batch-size', default=32, type=int)
    parser.add_argument('--base-lr',
                        '--learning-rate',
                        default=0.001,
                        type=float)
    parser.add_argument('--momentum',
                        default=0.9,
                        type=float,
                        metavar='M',
                        help='momentum')
    parser.add_argument('--weight-decay', '--wd', default=5e-4, type=float)
    parser.add_argument('--print-freq', '-p', default=20, type=int)
    parser.add_argument('--resume', default='', type=str, metavar='PATH')
    parser.add_argument('--devices-id', default='0,1', type=str)
    parser.add_argument('--filelists-train', default='', type=str)
    parser.add_argument('--filelists-val', default='', type=str)
    parser.add_argument('--root', default='')
    parser.add_argument('--snapshot', default='', type=str)
    parser.add_argument('--log-file', default='output.log', type=str)
    parser.add_argument('--log-mode', default='w', type=str)
    parser.add_argument('--size-average', default='true', type=str2bool)
    parser.add_argument('--num-classes', default=62, type=int)
    parser.add_argument('--arch',
                        default='mobilenet_1',
                        type=str,
                        choices=arch_choices)
    parser.add_argument('--frozen', default='false', type=str2bool)
    parser.add_argument('--milestones', default='15,25,30', type=str)
    parser.add_argument('--task', default='all', type=str)
    parser.add_argument('--test_initial', default='false', type=str2bool)
    parser.add_argument('--warmup', default=-1, type=int)
    parser.add_argument('--param-fp-train', default='', type=str)
    parser.add_argument('--param-fp-val', default='')
    parser.add_argument('--opt-style', default='resample',
                        type=str)  # resample
    parser.add_argument('--resample-num', default=132, type=int)
    parser.add_argument('--loss', default='vdc', type=str)

    global args
    args = parser.parse_args()

    # some other operations
    args.devices_id = [int(d) for d in args.devices_id.split(',')]
    args.milestones = [int(m) for m in args.milestones.split(',')]

    snapshot_dir = osp.split(args.snapshot)[0]
    mkdir(snapshot_dir)
Esempio n. 16
0
    def save_results(self, filename=None):
        if filename is None:
            filename = type(self.benchmark).__name__

        mkdir(self.root_dir)

        np.save("{}/{}.npy".format(self.root_dir, filename), self.results)

        self.__write_dim_info(filename, 0, self.data_sets)
        self.__write_dim_info(filename, 1,
                              [f.__name__ for f in self.feature_selectors])
        self.__write_dim_info(
            filename, 2, [m.__name__ for m in self.benchmark.get_measures()])
Esempio n. 17
0
    def save_artificial(data, labels, feature_labels):
        PreComputedData.delete("artificial")

        artificial_data_dir = DataSets.root_dir + "/ARTIFICIAL/ARTIFICIAL"

        io_utils.mkdir(artificial_data_dir)

        data_file_name = artificial_data_dir + "/artificial.data"
        label_file_name = artificial_data_dir + "/artificial.labels"
        feature_label_file_name = artificial_data_dir + "/artificial_feat.labels"

        np.save(data_file_name, data)
        np.save(label_file_name, labels)
        np.savetxt(feature_label_file_name, feature_labels, fmt='%d')
Esempio n. 18
0
def create_all_txts(data_dirs, tar_dir):
    trainvals = []
    tests = []
    for data_dir in data_dirs:
        trainvals.extend(_read_trainval(data_dir))
        tests.extend(_read_test(data_dir))

    main_dir = os.path.join(tar_dir, 'ImageSets/Main')
    io_utils.mkdir(main_dir)

    trainval_path = os.path.join(tar_dir, 'ImageSets/Main/trainval.txt')
    test_path = os.path.join(tar_dir, 'ImageSets/Main/test.txt')

    _write_txt(trainval_path, trainvals)
    _write_txt(test_path, tests)
Esempio n. 19
0
def main():

    JPEGImages_dir = os.path.join(output, 'JPEGImages')
    Annotations_dir = os.path.join(output, 'Annotations')

    if os.path.exists(JPEGImages_dir):
        # io_utils.remove_all(JPEGImages_dir)
        pass
    else:
        io_utils.mkdir(JPEGImages_dir)

    if os.path.exists(Annotations_dir):
        # io_utils.remove_all(Annotations_dir)
        pass
    else:
        io_utils.mkdir(Annotations_dir)

    single_process()
Esempio n. 20
0
def _compression(dir, output):
    if dir and output:
        JPEGImages_dir = os.path.join(dir, 'JPEGImages')
        Annotations_dir = os.path.join(dir, 'Annotations')
        images_paths = [
            os.path.join(JPEGImages_dir, s) for s in os.listdir(JPEGImages_dir)
        ]
        annos_paths = [
            os.path.join(Annotations_dir, s)
            for s in os.listdir(Annotations_dir)
        ]

        save_dir = output
        io_utils.mkdir(save_dir)

        save_path = os.path.join(save_dir,
                                 '{}{}'.format(os.path.basename(dir), '.zip'))
        zip_list(images_paths, save_path)
        zip_list(annos_paths, save_path, mode='a')
Esempio n. 21
0
def _rename_image(parent_dir, image_dir_name, str_date):
    # image_dir_name = 'JPEGImages'
    data_dir = os.path.join(parent_dir, image_dir_name)
    data_rename_dir = os.path.join(parent_dir,
                                   '{}_rename'.format(image_dir_name))

    io_utils.delete_file_folder(data_rename_dir)
    io_utils.mkdir(data_rename_dir)
    prefix = 'train'
    idx = 1000
    cur_date = datetime.datetime.now()
    # str_date = '{year}{month}{day}'.format(year=cur_date.year, month=cur_date.month, day=cur_date.day)
    for s in os.listdir(data_dir):
        old = os.path.join(data_dir, s)
        new = os.path.join(data_rename_dir,
                           '{}_{}_{}.jpg'.format(prefix, str_date, idx))
        io_utils.copy(old, new)
        idx = idx + 1

    return data_rename_dir
Esempio n. 22
0
def _compression_without_anno(dir, output):
    if dir and output:
        JPEGImages_dir = os.path.join(dir, 'JPEGImages')
        images_paths = [
            os.path.join(JPEGImages_dir, s) for s in os.listdir(JPEGImages_dir)
        ]
        images_paths.sort()
        save_dir = os.path.join(output, 'zip')
        io_utils.mkdir(save_dir)
        batch_size = 100
        image_batchs = [
            images_paths[i:i + batch_size]
            for i in range(0, len(images_paths), batch_size)
        ]

        for idx, image_batch in enumerate(image_batchs):
            save_path = os.path.join(
                save_dir, '{}{}{}'.format(os.path.basename(output),
                                          '-{}'.format(idx), '.zip'))
            zip_list(image_batch, save_path)
Esempio n. 23
0
def pick_marked(input_path):
    data_paths = [os.path.join(input_path, s) for s in ['all_data']]
    print('Parsing annotation files')

    for data_path in data_paths:
        annot_path = os.path.join(data_path, 'Annotations')
        imgs_path = os.path.join(data_path, 'JPEGImages')
        imgs_out_path = os.path.join(data_path, 'JPEGImages_marked')
        io_utils.mkdir(imgs_out_path)

        annots = [os.path.join(annot_path, s) for s in os.listdir(annot_path)]
        for annot in annots:
            try:
                et = ET.parse(annot)
                element = et.getroot()
                element_filename = element.find('filename').text
                filepath = os.path.join(imgs_path, element_filename)
                io_utils.move(filepath, imgs_out_path)
            except Exception as e:
                print('Exception in pascal_voc_parser: {}'.format(e))
                continue
Esempio n. 24
0
def rename_image(input_path):
    # data_path = os.path.join(input_path, 'from_internet')
    data_path = input_path
    # imgs_path = os.path.join(data_path, 'wwsp-wznn-hz-yw-125ml')
    # imgs_rename_path = os.path.join(data_path, 'wwsp-wznn-hz-yw-125ml_rename')
    # imgs_path = os.path.join(data_path, 'yl-ylcnn-pz-yw-250ml')
    # imgs_rename_path = os.path.join(data_path, 'yl-ylcnn-pz-yw-250ml_rename')
    data_paths = [os.path.join(data_path, s) for s in ['all_data']]
    for data_dir in data_paths:
        # prefix = data_dir.split('-')[1]
        prefix = 'train'
        imgs_rename_path = '{}_rename'.format(data_dir)
        io_utils.delete_file_folder(imgs_rename_path)
        io_utils.mkdir(imgs_rename_path)

        idx = 1000
        for s in os.listdir(data_dir):
            old = os.path.join(data_dir, s)
            new = os.path.join(imgs_rename_path,
                               '{}_20180319_{}.jpg'.format(prefix, idx))
            io_utils.rename(old, new)
            idx = idx + 1
Esempio n. 25
0
def copy_marked(data_path):

    if not os.path.isdir(data_path):
        print('input_path is not a dir: {}'.format(data_path))
        return

    # for data_path in input_path:
    annot_path = os.path.join(data_path, 'Annotations')
    imgs_path = os.path.join(data_path, 'JPEGImages')
    imgs_out_path = os.path.join(data_path, 'JPEGImages_marked')
    io_utils.mkdir(imgs_out_path)

    annots = [os.path.join(annot_path, s) for s in os.listdir(annot_path)]
    for annot in annots:
        try:
            et = ET.parse(annot)
            element = et.getroot()
            element_filename = element.find('filename').text
            filepath = os.path.join(imgs_path, element_filename)
            io_utils.copy(filepath, imgs_out_path)
        except Exception as e:
            print('Exception in pascal_voc_parser: {}'.format(e))
            continue
Esempio n. 26
0
def find_imgs_and_save_as_imgs(labels, parent_path):
    io_utils.mkdir(this_batch_imgs_path)
    for i in range(len(labels)):
        background = Image.open(
            "C:\\Users\\Administrator\\Desktop\\data_processing_carriechen\\count_all_annotations\\pure_white_background.jpg"
        )
        img = img_path + "\\" + labels[i] + ".jpg"
        product, taste, weight, package = get_chinese(refer_166classes,
                                                      labels[i])
        img = Image.open(img)
        background.paste(img, [100, 50])
        draw = ImageDraw.Draw(background)
        width, height = background.size
        setFont = ImageFont.truetype('C:\Windows\Fonts\\simfang.ttf', 30)
        fillColor = "black"
        draw.text((10, height - 100),
                  u"\"" + labels[i] + "\"",
                  font=setFont,
                  fill=fillColor)
        draw.text((10, height - 50),
                  u"\"" + product + taste + weight + package + "\"",
                  font=setFont,
                  fill=fillColor)
        background.save(this_batch_imgs_path + "\\" + labels[i] + ".jpg")
Esempio n. 27
0
import matplotlib.pyplot as plt
from pathlib import Path
from collections import defaultdict

checkpoint_fp_sia = "training_debug/logs/wpdc+shp+identification_4_V_lr05/_checkpoint_epoch_50.pth.tar"
checkpoint_fp_wpdc = "training_debug/logs/wpdc_alpha/_checkpoint_epoch_50.pth.tar"

root = "/home/luoyao/Project_3d/3D_face_solution/3DDFA_TPAMI/3DDFA_PAMI/train_aug_120x120"
filelists_test = "./label_train_aug_120x120.list.val"
no_label_filelists_test = "./train_aug_120x120.list.val"
img_names_list = Path(no_label_filelists_test).read_text().strip().split('\n')

boxplot_save_path = 'training_debug/logs/joint-loss-boxplot-7/'

mkdir('result-no-pose/')
#mkdir(boxplot_save_path)

#62d param result path
param_fp_gt = './train.configs/param_all_norm_val.pkl'
sia_param_62d_path = 'result-no-pose/sia_param_62d.npy'
wpdc_param_62d_path = 'result-no-pose/wpdc_param_62d.npy'


def read_line(path):
    img_names_list = Path(path).read_text().strip().split('\n')

    return img_names_list


def order_index_person(test_file_path):
 def __save(self, data_set, cv, method, feature_selection):
     mkdir(PreComputedData.dir_name(data_set, cv, method))
     np.save(PreComputedData.file_name(data_set, cv, method, self), feature_selection)
Esempio n. 29
0
    # when images have a large aspect ratio
    largest_side = max(rows, cols)
    if largest_side * scale > max_side:
        scale = max_side / largest_side

    # resize the image with the computed scale
    img = cv2.resize(img, None, fx=scale, fy=scale)
    output_path = os.path.join(
        output_path, "background" + "_" + str_date[0:4] + "-" + str_date[4:6] +
        "-" + str_date[6:8] + "-" + str(n) + ".jpg")
    cv2.imwrite(output_path, img)  #output img name,change here!!
    #return img, scale


#some path
parent_path = "test_folder/Background"
origin_path = os.path.join(parent_path, "origin")
handle_path = os.path.join(parent_path, "after")

str_date = '{year}{month}{day}'.format(year='2018', month='05',
                                       day='29')  #改这里,日期

if __name__ == '__main__':
    ncount = 10000
    io_utils.mkdir(handle_path)
    io_utils.remove_all(handle_path)
    for f in os.listdir(origin_path):
        img_path = os.path.join(origin_path, f)
        handle(img_path, 800, 1333, handle_path, ncount)  #background:800*1333
        ncount += 1
 def __save(self, data_set, cv, method, feature_selection):
     mkdir(PreComputedData.dir_name(data_set, cv, method))
     np.save(PreComputedData.file_name(data_set, cv, method, self),
             feature_selection)
Esempio n. 31
0
filelists_val = "./label_train_aug_120x120.list.val"
root = "/home/luoyao/Project_3d/3D_face_solution/3DDFA_TPAMI/3DDFA_PAMI/train_aug_120x120"
pre_model_params_path = "./training_debug/logs/shp-constrain/_checkpoint_epoch_50.pth.tar"  #wpdc + shp constrain model
log_file = "./training_debug/logs/wpdc+shp+identification_4_V_lr05/"
#loss
snapshot = "./training_debug/logs/wpdc+shp+identification_4_V_lr05/"
log_mode = 'w'
resume = ''
size_average = True
num_classes = 62
frozen = 'false'
task = 'all'
test_initial = False
resample_num = 132

mkdir(snapshot)


class sia_net(nn.Module):
    def __init__(self, model):
        super(sia_net, self).__init__()
        #取掉model的后两层
        self.fc1 = nn.Sequential(nn.Sequential(*list(model.children())[:-2]),
                                 nn.AdaptiveAvgPool2d(1))

        #        self.relu = nn.ReLU(inplace=True)
        self.fc1_0 = nn.Sequential(nn.Linear(2048, 1024), nn.Linear(1024, 512))

        self.fc1_1 = nn.Sequential(nn.Linear(2048, 62))

    def forward_once(self, x):
Esempio n. 32
0
    for i in range(2000):
        fn = fns[i]
        vertex = reconstruct_vertex(params[i], dense=True)  #dense 3d face
        wfp = osp.join(save_path, fn.replace('.jpg', '.mat'))
        print(wfp)
        sio.savemat(wfp, {'vertex': vertex})


if __name__ == '__main__':
    # step1: extract params
    checkpoint_fp_sia = 'training_debug/logs/wpdc+shp+identification_4_V_lr01/_checkpoint_epoch_50.pth.tar'  # model weight value
    checkpoint_fp_wpdc = 'training_debug/logs/wpdc_alpha/_checkpoint_epoch_50.pth.tar'  # model weight value
    root = 'test.data/AFLW2000-3D_crop'
    filelists = 'test.data/AFLW2000-3D_crop.list'

    mkdir('res/')
    params_sia_save_path = 'res/params_aflw2000_sia.npy'
    params_wpdc_save_path = 'res/params_aflw2000_wpdc.npy'

    aflw2000_sel_image = 'test.data/aflw2000_image_name.txt'
    mkdir('res/AFLW-2000-3D_vertex-sia/')
    mkdir('res/AFLW-2000-3D_vertex-wpdc/')
    sia_3d_vertex_path = 'res/AFLW-2000-3D_vertex-sia/'
    wpdc_3d_vertex_path = 'res/AFLW-2000-3D_vertex-wpdc/'

    data_info_sia = {
        'checkpoint_fp': checkpoint_fp_sia,
        'root': root,
        'filelists_test': filelists,
        'params_save_path': params_sia_save_path
    }