def preprocess(train=None, data=None): params = TransformationParameter() params.stride = 8 params.crop_size_x = 368 params.crop_size_y = 368 params.target_dist = 0.6 params.scale_prob = 1 params.scale_min = 0.5 params.scale_max = 1.1 params.max_rotate_degree = 40 params.center_perterb_max = 40 params.do_clahe = False params.num_parts_in_annot = 17 params.num_parts = 56 params.mirror = True dataTransformer = DataTransformer(params) np = 2 * (params.num_parts + 1) stride = params.stride grid_x = params.crop_size_x / stride grid_y = params.crop_size_y / stride channelOffset = grid_y * grid_x vec_channels = 38 heat_channels = 19 ch = vec_channels + heat_channels start_label_data = (params.num_parts + 1) * channelOffset transformed_data = [] # size: params.crop_size_x * params.crop_size_y * 3 transformed_label = [] # size: grid_x * grid_y * np # Transformation print("Transforming...") data_img, mask_img, label = dataTransformer.transform(data) return data_img, mask_img, label
def testTransform(self): data_transformer = DataTransformer( mapping=test_data.transformer_mapping) transformed_data = data_transformer.transform( test_data.data_from_dataset) print(transformed_data) self.assertDictEqual(transformed_data, test_data.transformed_data)
def run( self, x, y, labels, figname='', figsize=(15, 5), bands=3, colors=("#8BBCD4", "#2B7ABD", "#0050A0", "#EF9483", "#E02421", "#A90E0A") ): # dark blue, medium blue, light blue, dark red, medium red, light red """ Return the entire graph and its plt object Look at DataTransformer.transform to see how the data is transformed. Keyword arguments: x: single array with x values. Distance between neighboring entries have to be the same y: two-dimansional array with y values for each entry. labels: array with strings, shown as the labels on the y-axis. figsize: (a,b) used when creating the figure (optional) bands: default is 3 colors: array with the colors used for the bands. from dark to light blue, then from dark red to light red. Requirements: len(y[i]) == len(x) for all 0 <= i < len(y) len(y[0]) == len(labels) len(colors) == 2*bands RETURN: plt object """ self.check_valid_params(x, y, labels, figsize, bands, colors) n = len(y) F = self.create_figure(figname, figsize) df = DataTransformer(y, bands) for i in range(n): ax = F.add_subplot(n, 1, i + 1) transformed_x, bands = df.transform(y[i], x) for idx, band in enumerate(bands): ax.fill_between(transformed_x[idx], 0, band, color=colors[idx]) self.adjust_visuals_line(x, df, ax, i, labels) return plt
def preprocessing(train=None): params = TransformationParameter() params.stride = 8 params.crop_size_x = 368 params.crop_size_y = 368 params.target_dist = 0.6 params.scale_prob = 1 params.scale_min = 0.5 params.scale_max = 1.1 params.max_rotate_degree = 40 params.center_perterb_max = 40 params.do_clahe = False params.num_parts_in_annot = 17 params.num_parts = 56 params.mirror = True dataTransformer = DataTransformer(params) # dataTransformer.initRand() np = 2*(params.num_parts+1) stride = params.stride grid_x = params.crop_size_x / stride grid_y = params.crop_size_y / stride channelOffset = grid_y * grid_x vec_channels = 38 heat_channels = 19 ch = vec_channels + heat_channels start_label_data = (params.num_parts+1) * channelOffset transformed_data = [] # size: params.crop_size_x * params.crop_size_y * 3 transformed_label = [] # size: grid_x * grid_y * np # Dataset dataset_dir = os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..', 'dataset')) if train: anno_path = os.path.join(dataset_dir, "annotations/person_keypoints_train2017.json") else: anno_path = os.path.join(dataset_dir, "annotations/person_keypoints_val2017.json") # Transformation data_img,mask_img,label = dataTransformer.transform(filename,anno_path) return data_img, mask_img,label
def run(self, x, y, labels, figsize=(20,3), bands=3, colors=("#8BBCD4","#2B7ABD","#0050A0","#EF9483","#E02421", "#A90E0A")): # dark blue, medium blue, light blue, dark red, medium red, light red """ Return the entire graph and its plt object Look at DataTransformer.transform to see how the data is transformed. Keyword arguments: x: single array with x values. Distance between neighboring entries have to be the same y: two-dimansional array with y values for each entry. labels: array with strings, shown as the labels on the y-axis. figsize: (a,b) used when creating the figure (optional) bands: default is 3 colors: array with the colors used for the bands. from dark to light blue, then from dark red to light red. Requirements: len(y[i]) == len(x) for all 0 <= i < len(y) len(y[0]) == len(labels) len(colors) == 2*bands RETURN: plt object """ self.check_valid_params(x,y,labels,figsize,bands,colors) n = len(y) F = self.create_figure(figsize) df = DataTransformer(y, bands) for i in range(n): ax = F.add_subplot(n, 1, i+1) transformed_x, bands = df.transform(y[i], x) for idx,band in enumerate(bands): ax.fill_between(transformed_x[idx],0,band,color=colors[idx]) self.adjust_visuals_line(x, df, ax, i, labels) return plt
class DataTransformerTest(unittest.TestCase): def setUp(self): self.d = DataTransformer([[9,-9,0]], 3) self.common_x_ret = [[1,2,3],[1,2,3],[1,2,3],[1,2,3],[1,2,3],[1,2,3]] ## mixed positive and negative values def test_top_range_mixed_negative_positives(self): self.run_me([4,1,-9],[[1,2,2.5, 3],[1,2,2.5,3],[1,2,2.5,3],[1,2,2.5,3],[1,2,2.5,3],[1,2,2.5,3]],[ [3,1,0,0],[1,0,0,0],[0,0,0,0],[0,0,0,3],[0,0,0,3],[0,0,0,3]]) def test_positive_zero_negative_positive(self): self.run_me([4,0,-9],self.common_x_ret,[[3,0,0],[1,0,0],[0,0,0],[0,0,3],[0,0,3],[0,0,3]]) def test_mixed_zero_at_end(self): self.run_me([4,-1,0],[[1,1.5,2,3],[1,1.5,2,3],[1,1.5,2,3],[1,1.5,2,3],[1,1.5,2,3],[1,1.5,2,3]],[[3,0,0,0],[1,0,0,0],[0,0,0,0],[0,0,1,0],[0,0,0,0],[0,0,0,0]]) def test_positive_negative_positive(self): self.run_me([4,1,-9],[[1,2,2.5, 3],[1,2,2.5,3],[1,2,2.5,3],[1,2,2.5,3],[1,2,2.5,3],[1,2,2.5,3]],[ [3,1,0,0],[1,0,0,0],[0,0,0,0],[0,0,0,3],[0,0,0,3],[0,0,0,3]]) def test_positive_zero_negative(self): self.run_me([4,1,-9,3],[[1,2,2.5,3,3.5,4],[1,2,2.5,3,3.5,4],[1,2,2.5,3,3.5,4],[1,2,2.5,3,3.5,4],[1,2,2.5,3,3.5,4],[1,2,2.5,3,3.5,4]],[ [3,1,0,0,0,3],[1,0,0,0,0,0],[0,0,0,0,0,0],[0,0,0,3,0,0],[0,0,0,3,0,0],[0,0,0,3,0,0]], x_data=[1,2,3,4]) ## only positive OR negatives values def test_positive_bottom_edge(self): self.run_me([4.5,1,3],self.common_x_ret, [ [3,1,3],[1.5,0,0],[0,0,0],[0,0,0],[0,0,0],[0,0,0]]) def test_zero_at_beginning(self): self.run_me([0,3,0],self.common_x_ret,[[0,3,0],[0,0,0],[0,0,0],[0,0,0],[0,0,0],[0,0,0]]) def test_zero_at_end(self): self.run_me([4,3,0],self.common_x_ret,[[3,3,0],[1,0,0],[0,0,0],[0,0,0],[0,0,0],[0,0,0]]) def test_positive_zero_positive(self): self.run_me([4,0,1],self.common_x_ret,[[3,0,1],[1,0,0],[0,0,0],[0,0,0],[0,0,0],[0,0,0]]) def test_top_range_no_negative(self): self.run_me([4,1,9],self.common_x_ret,[ [3,1,3],[1,0,3],[0,0,3],[0,0,0],[0,0,0],[0,0,0]]) def test_edgecases_medium(self): self.assertTrue(self.d.transform([2,5.99,3.81],[1,2,3]) == (self.common_x_ret,[ [2,3,3],[0,2.99,0.81],[0,0,0],[0,0,0],[0,0,0],[0,0,0]])) def test_choice(self): self.assertTrue(self.d.transform([4,1,8],[1,2,3]) == (self.common_x_ret,[ [3,1,3],[1,0,3],[0,0,2],[0,0,0],[0,0,0],[0,0,0]])) def test_edge_top_negative_value(self): self.assertTrue(self.d.transform([0,-7.5,-9],[1,2,3]) == (self.common_x_ret,[ [0,0,0],[0,0,0],[0,0,0],[0,3,3],[0,3,3],[0,1.5,3]])) def test_edge_middle_negative_value(self): self.assertTrue(self.d.transform([-3.5,-6,-4],[1,2,3]) == (self.common_x_ret,[ [0,0,0],[0,0,0],[0,0,0],[3,3,3],[0.5,3,1],[0,0,0]])) def test_edge_bottom_negative_value(self): self.run_me([-0.5,-3,-8],self.common_x_ret,[[0,0,0],[0,0,0],[0,0,0], [0.5, 3, 3], [0,0,3],[0,0,2]]) def test_regular_bottom_negative_value(self): self.run_me([0,-2,-1],self.common_x_ret,[[0,0,0],[0,0,0], [0,0,0],[0,2,1],[0,0,0],[0,0,0]]) def run_me(self, data, x,y, x_data=[1,2,3]): a,b = self.d.transform(data,x_data) print a print b self.assertTrue(a == x) self.assertTrue(b == y)
def run( self, x, y, labels, figsize=(15, 20), bands=3, colors=("#8BBCD4", "#2B7ABD", "#0050A0", "#EF9483", "#E02421", "#A90E0A") ): # dark blue, medium blue, light blue, dark red, medium red, light red """ Return the entire graph and its plt object Look at DataTransformer.transform to see how the data is transformed. Keyword arguments: x: single array with x values. Distance between neighboring entries have to be the same y: two-dimensional array with y values for each entry. labels: array with strings, shown as the labels on the y-axis. figsize: (a,b) used when creating the figure (optional) bands: default is 3 colors: array with the colors used for the bands. from dark to light blue, then from dark red to light red. Requirements: len(y[i]) == len(x) for all 0 <= i < len(y) len(y[0]) == len(labels) len(colors) == 2*bands RETURN: plt object """ self.check_valid_params(x, y, labels, figsize, bands, colors) n = len(y[0, :]) F, axes = plt.subplots(n, 1, figsize=figsize, sharex=True, sharey=True) df = DataTransformer(y, bands) for i, ax in enumerate(axes.flatten()): transformed_x, ybands = df.transform(y[:, i], x) for idx, band in enumerate(ybands): ax.fill_between(transformed_x[idx], 0, band, color=colors[idx]) self.adjust_visuals_line(x, df, ax, i, n, labels) F.text(0.5, 0.04, 'Time', ha='center', size=30) F.text(0.04, 0.5, 'Error to observation ratio', va='center', rotation='vertical', size=30) handles = [] legend_colors = [ "#A90E0A", "#E02421", "#EF9483", "#8BBCD4", "#2B7ABD", "#0050A0" ] for c in legend_colors: handles.append(self.patch_creator(c)) bandwidths = int(df.max) / bands lowerbounds = np.arange(int(df.min), int(df.max), bandwidths) labels = [ str(int(b)) + ' - ' + str(int(b + bandwidths)) for b in lowerbounds ] F.legend(handles, labels, ncol=bands * 2, loc='upper center', fontsize='xx-large') return plt