def main(): if not fs.exists(DATA_DST): fs.mkdir(DATA_DST) meta_all = utils.shuffle_meta_data(utils.load_meta_data(DATA_SRC, WIKI_META_OBJ, IMDB_META_OBJ)) train, test = utils.split_meta_data(meta_all, TRAIN_TEST_SPLIT) train, val = utils.split_meta_data(train, TRAIN_VAL_SPLIT) # Free the memory del meta_all gc.collect() print("Converting blocks") print(" [train] %i Sapmles" % (train_samples)) i = 0 X_train, y_age, y_gender = utils.get_img_array(train, DATA_SRC, age_classes, img_dim=INPUT_DIM, split=i, num_samples_per_split=train_samples) np.save(fs.add_suffix(fs.join(DATA_DST, TRAIN_DATA_OBJ), '_%02d' % i), X_train) np.save(fs.add_suffix(fs.join(DATA_DST, TRAIN_DATA_OBJ), '_label_age_%02d' % i), y_age) np.save(fs.add_suffix(fs.join(DATA_DST, TRAIN_DATA_OBJ), '_label_gender_%02d' % i), y_gender) # Remove the array from memory del X_train del y_age del y_gender gc.collect() print(" [val] %i Sapmles" % (val_samples)) X_val, y_age, y_gender = utils.get_img_array(val, DATA_SRC, age_classes, img_dim=INPUT_DIM, num_samples_per_split=val_samples) np.save(fs.join(DATA_DST, VAL_DATA_OBJ), X_val) np.save(fs.add_suffix(fs.join(DATA_DST, VAL_DATA_OBJ), '_label_age'), y_age) np.save(fs.add_suffix(fs.join(DATA_DST, VAL_DATA_OBJ), '_label_gender'), y_gender) # Remove the array from memory del X_val del y_age del y_gender gc.collect() print("[test] %i Sapmles" % (test_samples)) i = 0 X_test, y_age, y_gender = utils.get_img_array(test, DATA_SRC, age_classes, img_dim=INPUT_DIM, split=i, num_samples_per_split=test_samples) np.save(fs.add_suffix(fs.join(DATA_DST, TEST_DATA_OBJ), '_%02d' % i), X_test) np.save(fs.add_suffix(fs.join(DATA_DST, TEST_DATA_OBJ), '_label_age_%02d' % i), y_age) np.save(fs.add_suffix(fs.join(DATA_DST, TEST_DATA_OBJ), '_label_gender_%02d' % i), y_gender) # Remove the array from memory del X_test del y_age del y_gender gc.collect()
def main(): if not fs.exists(DATA_DST): fs.mkdir(DATA_DST) meta_all = utils.shuffle_meta_data(utils.load_meta_data(DATA_SRC, WIKI_META_OBJ, IMDB_META_OBJ)) train, test = utils.split_meta_data(meta_all, TRAIN_TEST_SPLIT) train, val = utils.split_meta_data(train, TRAIN_VAL_SPLIT) # Free the memory del meta_all gc.collect() print("Converting blocks") print(" [train] %i Sapmles. %i Blocks required" % (len(train['path']), math.ceil(len(train['path']) / SAMPLES_PER_SPLIT))) for i in range(math.ceil(len(train['path']) / SAMPLES_PER_SPLIT)): X_train, y_age, y_gender = utils.get_img_array(train, DATA_SRC, age_classes, img_dim=INPUT_DIM, split=i, num_samples_per_split=SAMPLES_PER_SPLIT) np.save(fs.add_suffix(fs.join(DATA_DST, TRAIN_DATA_OBJ), '_%02d' % i), X_train) np.save(fs.add_suffix(fs.join(DATA_DST, TRAIN_DATA_OBJ), '_label_age_%02d' % i), y_age) np.save(fs.add_suffix(fs.join(DATA_DST, TRAIN_DATA_OBJ), '_label_gender_%02d' % i), y_gender) # Remove the array from memory del X_train del y_age del y_gender gc.collect() print(" [val] %i Sapmles. 1 Block forced" % (len(val['path']))) X_val, y_age, y_gender = utils.get_img_array(val, DATA_SRC, age_classes, img_dim=INPUT_DIM, num_samples_per_split=len(val['path'])) np.save(fs.join(DATA_DST, VAL_DATA_OBJ), X_val) np.save(fs.add_suffix(fs.join(DATA_DST, VAL_DATA_OBJ), '_label_age'), y_age) np.save(fs.add_suffix(fs.join(DATA_DST, VAL_DATA_OBJ), '_label_gender'), y_gender) # Remove the array from memory del X_val del y_age del y_gender gc.collect() print("[test] %i Sapmles. %i Blocks required" % (len(test['path']), math.ceil(len(test['path']) / SAMPLES_PER_SPLIT))) for i in range(math.ceil(len(test['path']) / SAMPLES_PER_SPLIT)): X_test, y_age, y_gender = utils.get_img_array(test, DATA_SRC, age_classes, img_dim=INPUT_DIM, split=i, num_samples_per_split=SAMPLES_PER_SPLIT) np.save(fs.add_suffix(fs.join(DATA_DST, TEST_DATA_OBJ), '_%02d' % i), X_test) np.save(fs.add_suffix(fs.join(DATA_DST, TEST_DATA_OBJ), '_label_age_%02d' % i), y_age) np.save(fs.add_suffix(fs.join(DATA_DST, TEST_DATA_OBJ), '_label_gender_%02d' % i), y_gender) # Remove the array from memory del X_test del y_age del y_gender gc.collect()
print("Expected Shape: ", nb_filter, stack_size, nb_col, nb_row) print("Found Shape: ", np.array(blobs[0].data).shape) weights_p = blobs[0].data.astype(dtype=np.float32) weights_b = blobs[1].data.astype(dtype=np.float32) if len(weights_p.shape) > 2: # Caffe uses the shape f, (d, y, x) # ConvnetJS uses the shape f, (y, x, d) weights_p = np.swapaxes(np.swapaxes(weights_p, 3, 1), 2, 1) print("Converted to Shape: ", weights_p.shape) weights = { 'filter': weights_p.reshape((nb_filter, stack_size*nb_col*nb_row)).tolist(), 'bias': weights_b.tolist() } filename = WEIGHTS_DIR + key + '.txt' if not fs.exists(fs.dirname(filename)): fs.mkdir(fs.dirname(filename)) fs.write(fs.add_suffix(filename, "_filter"), "") for i, f_weights in enumerate(weights['filter']): if i == len(weights['filter']) - 1: fs.append(fs.add_suffix(filename, "_filter"), ",".join(map(str, f_weights))) else: fs.append(fs.add_suffix(filename, "_filter"), ",".join(map(str, f_weights)) + "\n") fs.write(fs.add_suffix(filename, "_bias"), ",".join(map(str, weights['bias'])))
def test_add_suffix_to_filename(): _filename = 'test.csv' _expected = 'test_suf.csv' assert _expected == fs.add_suffix(_filename, "_suf")
def test_add_suffix_to_path(): _path = '/foo/bar/test' _expected = '/foo/bar/test_suf' assert _expected == fs.add_suffix(_path, "_suf")
def test_add_suffix_to_filename_in_path(): _filename = '/foo/bar/test.csv' _expected = '/foo/bar/test_suf.csv' assert _expected == fs.add_suffix(_filename, "_suf")
nb_row = blobs[0].width print("====> Layer: ", key) print("Expected Shape: ", nb_filter, stack_size, nb_col, nb_row) print("Found Shape: ", np.array(blobs[0].data).shape) weights_p = blobs[0].data.astype(dtype=np.float32) weights_b = blobs[1].data.astype(dtype=np.float32) if len(weights_p.shape) > 2: # Caffe uses the shape f, (d, y, x) # ConvnetJS uses the shape f, (y, x, d) weights_p = np.swapaxes(np.swapaxes(weights_p, 3, 1), 2, 1) print("Converted to Shape: ", weights_p.shape) weights = { 'filter': weights_p.reshape((nb_filter, stack_size*nb_col*nb_row)), 'bias': weights_b } filename = WEIGHTS_DIR + key + '.bin' if not fs.exists(fs.dirname(filename)): fs.mkdir(fs.dirname(filename)) with open(fs.add_suffix(filename, "_filter"), 'wb') as f: f.write(weights['filter'].astype(np.float32).tostring()) with open(fs.add_suffix(filename, "_bias"), 'wb') as f: f.write(weights['bias'].astype(np.float32).tostring())
print("====> Layer: ", key) print("Expected Shape: ", nb_filter, stack_size, nb_col, nb_row) print("Found Shape: ", np.array(blobs[0].data).shape) weights_p = blobs[0].data.reshape( (nb_filter, stack_size, nb_col, nb_row)).astype(dtype=np.float32) weights_b = blobs[1].data.astype(dtype=np.float32) if len(weights_p.shape) > 2: # Caffe uses the shape f, (d, y, x) # ConvnetJS uses the shape f, (y, x, d) weights_p = np.swapaxes(np.swapaxes(weights_p, 3, 1), 2, 1) print("Converted to Shape: ", weights_p.shape) weights = { 'filter': weights_p.reshape((nb_filter, stack_size * nb_col * nb_row)), 'bias': weights_b } filename = WEIGHTS_DIR + key + '.bin' prev_shape = (nb_filter, stack_size, nb_col, nb_row) if not fs.exists(fs.dirname(filename)): fs.mkdir(fs.dirname(filename)) with open(fs.add_suffix(filename, "_filter"), 'wb') as f: f.write(weights['filter'].astype(np.float32).tostring()) with open(fs.add_suffix(filename, "_bias"), 'wb') as f: f.write(weights['bias'].astype(np.float32).tostring())