Esempio n. 1
0
    def load_data(self, indices=None, do_random_crop=False, do_random_flip=False, save_images=False):
        if indices is None:
            indices = range(self.nrof_samples)
        nrof_samples = len(indices)
        images = np.zeros((nrof_samples, self.out_image_size, self.out_image_size, 3))
        labels = np.zeros((nrof_samples, len(Constants.get_emotion_cols())))
        for out_idx, sample_idx in enumerate(indices):
            # Getting images
            img_name = self.dataset.iloc[sample_idx]['img_name']
            inp_dataset = self.dataset.iloc[sample_idx]['dataset']
            img_arr = np.array([int(i) for i in self.dataset.iloc[sample_idx]['pixels'].split(' ')])
            img_arr = np.reshape(img_arr, (self.in_image_size, self.in_image_size))
            if img_arr.ndim == 2:
                img_arr = self._to_rgb(img_arr)
            img_arr = self._crop(img_arr, do_random_crop, self.in_image_size)
            img_arr = self._flip(img_arr, do_random_flip)
            img = Image.fromarray(img_arr, 'RGB')
            resized_img = img.resize((self.out_image_size, self.out_image_size))
            if save_images:
                self._save_image(inp_dataset, img_name, resized_img)
            images[out_idx, :, :, :] = resized_img

            # Getting labels
            inp_labels = self.dataset.iloc[sample_idx][Constants.get_emotion_cols()]
            n_annotations = sum(inp_labels)
            probs_labels = [float(i) for i in inp_labels] / n_annotations
            labels[out_idx, :] = probs_labels

        # Getting previously processed embeddings
        if self.import_embedding:
            embeddings = self.embeddings[indices]
        else:
            embeddings = None
        return images, labels, embeddings
Esempio n. 2
0
def initialization():
    """ Loads the constants, calculates the volume and number of walkers"""
    constants = Constants()
    constants.volume = total_number_of_segments(constants.dimension,
                                                constants.radius)
    constants.number_of_walkers = int(constants.concentration *
                                      constants.volume)
    b_tau = []
    radii = [10]  #[5,6,7, 8]
    diffusion_constants = [0.02, 0.05, 0.1, 0.5, 1]
    return constants, b_tau, radii, diffusion_constants
Esempio n. 3
0
 def _calc_metrics(series):
     n_annotations = sum(series[Constants.get_emotion_cols()])
     if n_annotations == 0:
         series['entropy'] = np.nan
         series['disagreement_p'] = np.nan
         return series
     # count -> probabilities.
     probs = list(series[Constants.get_emotion_cols()]*1.0/n_annotations)
     series['entropy'] = scipy.stats.entropy(probs)
     series['disagreement_p'] = 1.0 - sum([p*p for p in probs])  # 1 - \sum p^2
     series['n_annotations'] = n_annotations
     series['emotion_corrected_label'] = Constants.correct_emotion_label(series['emotion'])
     return series
Esempio n. 4
0
 def _preprocess(df):
     df = df.apply(FERPlus._calc_metrics, axis=1)
     df = df.dropna(subset=['img_name', 'entropy', 'disagreement_p'])
     df['emotion_corrected_label'] = df['emotion_corrected_label'].astype(int)
     df = df[['emotion', 'pixels', 'dataset', 'img_name'] + Constants.get_label_cols() +
             ['emotion_corrected_label', 'entropy', 'disagreement_p', 'n_annotations']]
     return df
Esempio n. 5
0
    def download_image(url, bounding_box, raw_filename, processed_filename):
        # Downloading images
        try:
            request = urllib.request.urlopen(url, timeout=5)
            with open(raw_filename, 'wb') as f:
                f.write(request.read())
        except Exception as e:
            print(
                f'{str(e)}. Error downloading image {url}. Skipping downloading image.'
            )
            return

        # Processing downloaded image
        try:
            img = Image.open(raw_filename)
            img_width, img_height = img.size
            cropped_img = img.crop(
                bounding_box.get_area(width=img_width, height=img_height))
            img_size = Constants.get_output_image_size()
            resized_img = cropped_img.resize((img_size, img_size))
            resized_img.save(processed_filename)
        except Exception as e:
            print(
                f'{str(e)}. Error reading image {raw_filename}. Skipping processing image.'
            )
Esempio n. 6
0
    def __init__(self, file_path, in_image_size=48, out_image_size=160, import_embedding=True,
                 embedding_model='VGGFace2_Inception_ResNet_v1', embedding_layer='Mixed_5a', seed=666):
        self.seed = seed
        np.random.seed(self.seed)
        self.root_dir = file_path[:file_path.rfind('/') + 1]
        self.subset = file_path[file_path.rfind('/') + 1:-4]
        self.import_embedding = import_embedding
        if self.import_embedding:
            embedding_dir = os.path.join(os.path.join(self.root_dir, 'embedding'), self.subset)
            embedding_file = os.path.join(embedding_dir, f'embeddings_{embedding_model}_{embedding_layer}.pkl')
            self.embeddings = pickle.load(open(embedding_file, 'rb'))
            self.embedding_size = np.shape(self.embeddings)[1]
        else:
            self.embedding = None
            self.embedding_size = None
        self.dataset = pd.read_csv(file_path)
        self.train = self.dataset[self.dataset['dataset'] == 'Training']
        self.valid = self.dataset[self.dataset['dataset'] == 'PrivateTest']
        self.test = self.dataset[self.dataset['dataset'] == 'PublicTest']
        self.in_image_size = in_image_size
        self.out_image_size = out_image_size
        self.nrof_samples = len(self.dataset)
        self.shuffled_train_indices = self.train.index.to_list()
        np.random.shuffle(self.shuffled_train_indices)

        # Saving numpy arrays for images and labels
        targets = np.array(self.dataset['emotion_corrected_label'])
        self.single_labels = np.eye(Constants.get_no_emotions())[targets]
        self.labels = np.zeros((self.nrof_samples, Constants.get_no_emotions()))
        for idx in range(self.nrof_samples):
            if idx % 100 == 0:
                print(f'\033[1A\033[KConverting row {idx} of dataframe to numpy array.')
            # Getting labels
            inp_labels = self.dataset.iloc[idx][Constants.get_emotion_cols()]
            n_annotations = sum(inp_labels)
            probs_labels = [float(i) for i in inp_labels] / n_annotations
            self.labels[idx, :] = probs_labels
Esempio n. 7
0
 def __init__(self, file_path, seed=666):
     self.seed = seed
     np.random.seed(self.seed)
     self.file_path = file_path
     cols = []
     for i in range(3):
         cols += [
             f'img_{i}', f'top_left_column_{i}', f'bottom_right_column_{i}',
             f'top_left_row_{i}', f'bottom_right_row_{i}'
         ]
     cols += ['triplet_type']
     for i in range(Constants.get_max_FEC_annotations()):
         cols += [f'id_{i}', f'label_{i}']
     self.dataset = pd.read_csv(file_path,
                                header=None,
                                names=cols,
                                engine='python')
     self.root_dir = file_path[:file_path.rfind('/') + 1]
     if 'train' in file_path:
         self.images_dir = self.root_dir + 'images/train/'
     elif 'test' in file_path:
         self.images_dir = self.root_dir + 'images/test/'
Esempio n. 8
0
 def get_num_classes():
     return len(Constants.get_emotion_cols())