Esempio n. 1
0
    def get_transforms(self):
        normalize = Normalize(mean=[0.485, 0.456, 0.406],
                              std=[0.229, 0.224, 0.225])

        inference_transforms = Compose(
            [Resize(RSIZE),
             CenterCrop(RCROP), ToNumpy, normalize])
        return inference_transforms
Esempio n. 2
0
 def load_dataset(self):
     self.dataset = self.dataset_class(txt_file='testing.txt',
                                       root_dir=self.root_dir,
                                       transform=transforms.Compose([
                                           Resize((64, 64)),
                                           ToTensor()
                                       ])
                                       )
     self.dataloader = DataLoader(self.dataset, batch_size=self.batch_size,
                                  shuffle=self.is_shuffle, num_workers=self.num_workers)
Esempio n. 3
0
 def set_transformers(self):
     self.transforms = {
         self.augmentation_name[0]:
         transforms.Compose(
             [ToPILImage(),
              ToTensor(),
              Resize(self.resize),
              OrigPad()]),
         self.augmentation_name[1]:
         transforms.Compose([
             ToPILImage(),
             # Choose from tranforms_list randomly
             transforms.RandomChoice(self.randomchoice['choice1']),
             ToTensor(),
             Resize(self.resize),
             OrigPad()
         ]),
         self.augmentation_name[2]:
         transforms.Compose([
             ToPILImage(),
             transforms.RandomChoice(self.randomchoice['choice2']),
             ToTensor(),
             Resize(self.resize),
             OrigPad()
         ]),
         self.augmentation_name[3]:
         transforms.Compose([
             ToPILImage(),
             transforms.RandomChoice(self.randomchoice['choice3']),
             ToTensor(),
             Resize(self.resize),
             OrigPad()
         ]),
         self.augmentation_name[4]:
         transforms.Compose([
             ToPILImage(),
             transforms.RandomChoice(self.randomchoice['choice4']),
             ToTensor(),
             Resize(self.resize),
             OrigPad()
         ])
     }
Esempio n. 4
0
print(args)

# Dataset and Dataloader
# Dataset Read_in Part
root_dir = "/data1/yinzi/datas"
parts_root_dir = "/home/yinzi/data3/recroped_parts"

txt_file_names = {
    'train': "exemplars.txt",
    'val': "tuning.txt",
    'test': "testing.txt"
}

transforms_list = {
    'train': transforms.Compose([ToTensor(),
                                 Resize((128, 128)),
                                 OrigPad()]),
    'val': transforms.Compose([ToTensor(),
                               Resize((128, 128)),
                               OrigPad()]),
    'test': transforms.Compose([ToTensor(),
                                Resize((128, 128)),
                                OrigPad()])
}

# DataLoader
Dataset = {
    x: HelenDataset(txt_file=txt_file_names[x],
                    root_dir=root_dir,
                    parts_root_dir=parts_root_dir,
                    transform=transforms_list[x])
Esempio n. 5
0
    def segments(self, pic):
        # break up image into 9 sub pictures 
        numpy_pic = numpy.asarray(pic)
        height, width, _ = numpy_pic.shape
        section_height, section_width = height/self.sections, width/self.sections
 
        current_bottom, current_right = 0, 0
        outers, centers = [], []
        print 'height ', height
        print 'width ', width
        while current_bottom <= height:
            while current_right <= width:
                # get the row
                crop = Resize.crop(pic, current_right, current_bottom, section_width*2, section_height*2)
	 
                dist_from_current_right_to_edge = width - ((2*section_width) + current_right)
                dist_from_current_bottom_to_edge = height - ((2*section_height) + current_bottom)
                #pdb.set_trace()
 
                dist_from_current_left_to_edge = current_right
                dist_from_current_top_to_edge = current_bottom
 
                print "dist_from_current_left_to_edge ", dist_from_current_left_to_edge 
                print "dist_from_current_right_to_edge ", dist_from_current_right_to_edge 
                if self.about_equal(dist_from_current_left_to_edge, dist_from_current_right_to_edge, section_width) and self.about_equal(dist_from_current_top_to_edge, dist_from_current_bottom_to_edge, section_height):
                    centers.append(crop)
                else:
                    outers.append(crop)
                current_right += section_width
                print current_right, current_bottom
            else:
                current_right = 0

            current_bottom += section_height

        return centers, outers
       
        def all_segments(self, pics):
            all_centers, all_outers = [], []
            for pic in pics:
                centers, outers = self.segments(pic)
                all_centers.append(centers)
                all_outers.append(outers)
            return all_centers, all_outers

        def train(self, pics):
            faces, non_faces = self.all_segments(pics)
            targets = []
            for f in faces:
                targets.append(1)
            for n in non_faces:
                targets.append(0)
            data = faces + non_faces

            model = self.model.fit(data, targets)
            self.model = model

        def find_face(self, pic):
            segments = self.segments(pic)
            preds = []
            for s in segments:
                preds.append(self.model.predict(s))

            highest = sorted(preds)[-1]
            index = preds.index(highest)
            return segments[index]
Esempio n. 6
0
                    help="eval_per_epoch ")
args = parser.parse_args()
print(args)

# Dataset Read_in Part
img_root_dir = "/data1/yinzi/datas"
# root_dir = '/home/yinzi/Downloads/datas'
part_root_dir = "/data1/yinzi/facial_parts"
root_dir = {'image': img_root_dir, 'parts': part_root_dir}
txt_file_names = {'train': "exemplars.txt", 'val': "tuning.txt"}

twostage_Dataset = {
    x: TwoStepData(txt_file=txt_file_names[x],
                   root_dir=root_dir,
                   transform=transforms.Compose(
                       [Resize((64, 64)),
                        ToTensor(), Normalize()]))
    for x in ['train', 'val']
}

two_dataloader = {
    x: DataLoader(twostage_Dataset[x],
                  batch_size=args.batch_size,
                  shuffle=True,
                  num_workers=16)
    for x in ['train', 'val']
}


class TrainModel(TemplateModel):
    def __init__(self):
Esempio n. 7
0
    def segments(self, pic):
        # break up image into 9 sub pictures
        numpy_pic = numpy.asarray(pic)
        height, width, _ = numpy_pic.shape
        section_height, section_width = height / self.sections, width / self.sections

        current_bottom, current_right = 0, 0
        outers, centers = [], []
        print 'height ', height
        print 'width ', width
        while current_bottom <= height:
            while current_right <= width:
                # get the row
                crop = Resize.crop(pic, current_right, current_bottom,
                                   section_width * 2, section_height * 2)

                dist_from_current_right_to_edge = width - (
                    (2 * section_width) + current_right)
                dist_from_current_bottom_to_edge = height - (
                    (2 * section_height) + current_bottom)
                #pdb.set_trace()

                dist_from_current_left_to_edge = current_right
                dist_from_current_top_to_edge = current_bottom

                print "dist_from_current_left_to_edge ", dist_from_current_left_to_edge
                print "dist_from_current_right_to_edge ", dist_from_current_right_to_edge
                if self.about_equal(dist_from_current_left_to_edge,
                                    dist_from_current_right_to_edge,
                                    section_width) and self.about_equal(
                                        dist_from_current_top_to_edge,
                                        dist_from_current_bottom_to_edge,
                                        section_height):
                    centers.append(crop)
                else:
                    outers.append(crop)
                current_right += section_width
                print current_right, current_bottom
            else:
                current_right = 0

            current_bottom += section_height

        return centers, outers

        def all_segments(self, pics):
            all_centers, all_outers = [], []
            for pic in pics:
                centers, outers = self.segments(pic)
                all_centers.append(centers)
                all_outers.append(outers)
            return all_centers, all_outers

        def train(self, pics):
            faces, non_faces = self.all_segments(pics)
            targets = []
            for f in faces:
                targets.append(1)
            for n in non_faces:
                targets.append(0)
            data = faces + non_faces

            model = self.model.fit(data, targets)
            self.model = model

        def find_face(self, pic):
            segments = self.segments(pic)
            preds = []
            for s in segments:
                preds.append(self.model.predict(s))

            highest = sorted(preds)[-1]
            index = preds.index(highest)
            return segments[index]