コード例 #1
0
def export_neglected_file_list(monitoring_dir, ROOT_DIR, LOG_DIR, backup_file_list):
    today_obj = datetime.datetime.today()
    today_str = str(today_obj.year)+str(today_obj.month)+str(today_obj.day)
    export_name = today_str + "_neglected_files.log"
    export_path = fs.join([ROOT_DIR, LOG_DIR, export_name])
    if not fs.exists(fs.join([ROOT_DIR, LOG_DIR])):
        try:
            fs.mkdir(fs.join([ROOT_DIR, LOG_DIR]))
        except:
            print ("Can't create LOG_DIR in Func:", export_neglected_file_list)
    try:
        fs.touch(export_path)
        file = fs.open(export_path, 'w')
        for f in backup_file_list:
            try:
                file.write('================================================')
                file.write('\n')
                file.write(fs.filename(f))
                file.write('\n')
                file.write(fs.dirname(f))
                file.write('\n')
            except:
                print("Cant' write export file in func: export_neglected_file_list")
    except:
        print ("cant export in func: export_neglected_file_list")
コード例 #2
0
def load_meta_data(data_src, wiki_src, imdb_src):
    with open(fs.join(data_src, wiki_src), 'rb') as file:
        wiki_meta = pickle.load(file)

    with open(fs.join(data_src, imdb_src), 'rb') as file:
        imdb_meta = pickle.load(file)

    return merge_meta_data(imdb_meta, wiki_meta)
コード例 #3
0
def main():

  if not fs.exists(DATA_DST):
    fs.mkdir(DATA_DST)

  meta_all = utils.shuffle_meta_data(utils.load_meta_data(DATA_SRC, WIKI_META_OBJ, IMDB_META_OBJ))

  train, test = utils.split_meta_data(meta_all, TRAIN_TEST_SPLIT)
  train, val = utils.split_meta_data(train, TRAIN_VAL_SPLIT)

  # Free the memory
  del meta_all
  gc.collect()

  print("Converting blocks")

  print(" [train] %i Sapmles. %i Blocks required" % (len(train['path']), math.ceil(len(train['path']) / SAMPLES_PER_SPLIT)))

  for i in range(math.ceil(len(train['path']) / SAMPLES_PER_SPLIT)):
    X_train, y_age, y_gender = utils.get_img_array(train, DATA_SRC, age_classes, img_dim=INPUT_DIM, split=i, num_samples_per_split=SAMPLES_PER_SPLIT)
    np.save(fs.add_suffix(fs.join(DATA_DST, TRAIN_DATA_OBJ), '_%02d' % i), X_train)
    np.save(fs.add_suffix(fs.join(DATA_DST, TRAIN_DATA_OBJ), '_label_age_%02d' % i), y_age)
    np.save(fs.add_suffix(fs.join(DATA_DST, TRAIN_DATA_OBJ), '_label_gender_%02d' % i), y_gender)
    
    # Remove the array from memory
    del X_train
    del y_age
    del y_gender
    gc.collect()

  print(" [val] %i Sapmles. 1 Block forced" % (len(val['path'])))

  X_val, y_age, y_gender = utils.get_img_array(val, DATA_SRC, age_classes, img_dim=INPUT_DIM, num_samples_per_split=len(val['path']))
  np.save(fs.join(DATA_DST, VAL_DATA_OBJ), X_val)
  np.save(fs.add_suffix(fs.join(DATA_DST, VAL_DATA_OBJ), '_label_age'), y_age)
  np.save(fs.add_suffix(fs.join(DATA_DST, VAL_DATA_OBJ), '_label_gender'), y_gender)

  # Remove the array from memory
  del X_val
  del y_age
  del y_gender
  gc.collect()

  print("[test] %i Sapmles. %i Blocks required" % (len(test['path']), math.ceil(len(test['path']) / SAMPLES_PER_SPLIT)))

  for i in range(math.ceil(len(test['path']) / SAMPLES_PER_SPLIT)):
    X_test, y_age, y_gender = utils.get_img_array(test, DATA_SRC, age_classes, img_dim=INPUT_DIM, split=i, num_samples_per_split=SAMPLES_PER_SPLIT)
    np.save(fs.add_suffix(fs.join(DATA_DST, TEST_DATA_OBJ), '_%02d' % i), X_test)
    np.save(fs.add_suffix(fs.join(DATA_DST, TEST_DATA_OBJ), '_label_age_%02d' % i), y_age)
    np.save(fs.add_suffix(fs.join(DATA_DST, TEST_DATA_OBJ), '_label_gender_%02d' % i), y_gender)
    
    # Remove the array from memory
    del X_test
    del y_age
    del y_gender
    gc.collect()
コード例 #4
0
def main():

  if not fs.exists(DATA_DST):
    fs.mkdir(DATA_DST)

  meta_all = utils.shuffle_meta_data(utils.load_meta_data(DATA_SRC, WIKI_META_OBJ, IMDB_META_OBJ))

  train, test = utils.split_meta_data(meta_all, TRAIN_TEST_SPLIT)
  train, val = utils.split_meta_data(train, TRAIN_VAL_SPLIT)

  # Free the memory
  del meta_all
  gc.collect()

  print("Converting blocks")

  print(" [train] %i Sapmles" % (train_samples))

  i = 0
  X_train, y_age, y_gender = utils.get_img_array(train, DATA_SRC, age_classes, img_dim=INPUT_DIM, split=i, num_samples_per_split=train_samples)
  np.save(fs.add_suffix(fs.join(DATA_DST, TRAIN_DATA_OBJ), '_%02d' % i), X_train)
  np.save(fs.add_suffix(fs.join(DATA_DST, TRAIN_DATA_OBJ), '_label_age_%02d' % i), y_age)
  np.save(fs.add_suffix(fs.join(DATA_DST, TRAIN_DATA_OBJ), '_label_gender_%02d' % i), y_gender)
    
  # Remove the array from memory
  del X_train
  del y_age
  del y_gender
  gc.collect()

  print(" [val] %i Sapmles" % (val_samples))

  X_val, y_age, y_gender = utils.get_img_array(val, DATA_SRC, age_classes, img_dim=INPUT_DIM, num_samples_per_split=val_samples)
  np.save(fs.join(DATA_DST, VAL_DATA_OBJ), X_val)
  np.save(fs.add_suffix(fs.join(DATA_DST, VAL_DATA_OBJ), '_label_age'), y_age)
  np.save(fs.add_suffix(fs.join(DATA_DST, VAL_DATA_OBJ), '_label_gender'), y_gender)

  # Remove the array from memory
  del X_val
  del y_age
  del y_gender
  gc.collect()

  print("[test] %i Sapmles" % (test_samples))

  i = 0
  X_test, y_age, y_gender = utils.get_img_array(test, DATA_SRC, age_classes, img_dim=INPUT_DIM, split=i, num_samples_per_split=test_samples)
  np.save(fs.add_suffix(fs.join(DATA_DST, TEST_DATA_OBJ), '_%02d' % i), X_test)
  np.save(fs.add_suffix(fs.join(DATA_DST, TEST_DATA_OBJ), '_label_age_%02d' % i), y_age)
  np.save(fs.add_suffix(fs.join(DATA_DST, TEST_DATA_OBJ), '_label_gender_%02d' % i), y_gender)
  
  # Remove the array from memory
  del X_test
  del y_age
  del y_gender
  gc.collect()
コード例 #5
0
def _remove_test_dir(ROOT_DIR, TARGET_ROOT_DIR, backup_dir_name):
    backup_dir_name = fs.join([ROOT_DIR, TARGET_ROOT_DIR, backup_dir_name])
    try:
        fs.rmdir(backup_dir_name, recursive=True)
        print (backup_dir_name, "was successfully removed")
    except:
        print ("ERROR", backup_dir_name, "couldn't removed by func:_remove_test_dir")
コード例 #6
0
def create_new_backup_root_dir(ROOT_DIR, TARGET_ROOT_DIR, backup_dir_name):
    backup_dir_path = fs.join([ROOT_DIR, TARGET_ROOT_DIR, backup_dir_name])
    if not fs.exists(backup_dir_path):
        try:
            fs.mkdir(backup_dir_path)
            print (backup_dir_path, "was successfully created")
        except:
            print ("Cant create a backup directory in func: create_new_backup_root_dir")
コード例 #7
0
ファイル: test_addpath.py プロジェクト: sevnote/python-fs
def test_addpath():
    TEST_MODULE = 'test_foo_bar.py'

    fs.touch(fs.join(TEST_DIR, TEST_MODULE))

    with pytest.raises(ImportError):
        import test_foo_bar

    fs.addpath(TEST_DIR)
    import test_foo_bar
コード例 #8
0
ファイル: fabfile.py プロジェクト: lastrye/sh-install
def make():
	"""Generate the current shell scripts from the templates"""
	clean()

	for _file in fs.find('*.sh', SRC_DIR):
		tplname = _file.replace(SRC_DIR + '/', "")
		dest = fs.join(DIST_DIR, fs.filename(tplname))
		tpl = env.get_template(tplname)
		fs.write(dest, tpl.render())
		print("Writing template %s" % tplname)
コード例 #9
0
def create_archive_tree(monitoring_dir, ROOT_DIR, TARGET_ROOT_DIR, backup_dir_name, backup_dir_list):
    for source_full_path in backup_dir_list:
        re_pattern = re.compile(r'%s' % monitoring_dir)
        source_path = re.sub(re_pattern, '', source_full_path)
        archive_path = fs.join([ROOT_DIR, TARGET_ROOT_DIR, backup_dir_name, source_path])
        if not fs.exists(archive_path):
            try:
                fs.mkdir(archive_path)
            except:
                print (archive_path, "Can't create archive tree in func: create_archive_tree")
コード例 #10
0
def get_img_array(meta_data,
                  data_src,
                  age_classes,
                  img_dim=(3, 224, 224),
                  split=0,
                  num_samples_per_split=100000,
                  dtype=np.float32):
    i_start = min(split * num_samples_per_split, len(meta_data['path']))
    i_stop = min(split * num_samples_per_split + num_samples_per_split,
                 len(meta_data['path']))
    num_samples = i_stop - i_start

    X = np.zeros((num_samples, img_dim[0], img_dim[1], img_dim[2]),
                 dtype=dtype)
    y_age = np.zeros((num_samples))
    y_gender = np.zeros((num_samples))

    print('  Allocating %s for dataset with shape (%i,%i,%i,%i)' % (sizeof_fmt(
        X.nbytes), num_samples, img_dim[0], img_dim[1], img_dim[2]))

    age_class = lambda x: age_classes.index(
        next(filter(lambda e: x >= e[0] and x <= e[1], age_classes)))

    for i in range(i_start, i_stop):
        y_age[i - i_start] = age_class(meta_data['age'][i])

        # Replace all non valid gender labels with male labels
        y_gender[i - i_start] = meta_data['gender'][i] if int(
            meta_data['gender'][i]) in [0, 1] else 1
        abspath = fs.join(data_src, meta_data['path'][i])

        # Catch errors
        try:
            with Image.open(abspath) as img:
                img = img.resize(img_dim[1:3], RESIZE_TYPE).convert('RGB')
                X[i - i_start] = np.asarray(img, dtype=dtype).transpose(
                    (2, 1, 0)) / 255
        except OSError as e:
            print("Error reading file %s" % abspath)
            continue

    return X, y_age, y_gender
コード例 #11
0
batchsize = 64
learning_rate = 1e-2
decay_rate = 0.0
l2_reg = 1e-3
early_stopping_rounds = 10
use_class_weights = False

timestamp = datetime.datetime.now().strftime("%Y-%m-%d_%H:%M:%S")

MODEL_NAME = 'VGG_16_GENDER_%i_%i_%i' % shp

DATASET_DIR = '/data/imdb-wiki-dataset'
MODEL_DIR = '/data/models'
LOGS_DIR = '/data/logs'

MODEL_PATH = fs.join(MODEL_DIR, '%s_%s.h5' % (MODEL_NAME, timestamp))
LOGS_PATH = fs.join(LOGS_DIR, '%s_%s.txt' % (MODEL_NAME, timestamp))

print("Loading Dataset ...")
# Initialize the datasets
ds_train = Dataset(DATASET_DIR, 'train')
ds_val = Dataset(DATASET_DIR, 'val')

# Get the class names for this dataset
class_names = ds_train.label_names

# Initialize the preprocessing pipeline
print("Setting up preprocessing ...")
tform = get_normalization_transform(
  means=ds_train.get_mean(per_channel=True),
  stds=ds_train.get_stddev(per_channel=True)
コード例 #12
0
ファイル: test_join.py プロジェクト: sevnote/python-fs
def test_join_array():
    assert fs.join([os.sep, 'etc', 'var']) == os.sep + 'etc' + os.sep + 'var'
    assert fs.join([os.sep, 'etc', 'var/']) == os.sep + 'etc' + os.sep + 'var/'
コード例 #13
0
ファイル: test_join.py プロジェクト: sevnote/python-fs
def test_join_arguments():
    assert fs.join(os.sep, 'etc', 'var') == os.sep + 'etc' + os.sep + 'var'
    assert fs.join(os.sep, 'etc', 'var/') == os.sep + 'etc' + os.sep + 'var/'
コード例 #14
0
ファイル: fabfile.py プロジェクト: lastrye/sh-install
from __future__ import print_function

from fabric.api import task, local
from jinja2 import Environment, FileSystemLoader

import fs

ROOT_DIR = fs.cwd()
SRC_DIR = fs.join(ROOT_DIR, 'src')
DIST_DIR = fs.join(ROOT_DIR, 'dist')

env = Environment()
env.loader = FileSystemLoader(SRC_DIR)

@task
def clean():
	"""Clean the dist/ directory"""
	if (fs.isdir(DIST_DIR)):
		fs.truncate(DIST_DIR)

@task
def make():
	"""Generate the current shell scripts from the templates"""
	clean()

	for _file in fs.find('*.sh', SRC_DIR):
		tplname = _file.replace(SRC_DIR + '/', "")
		dest = fs.join(DIST_DIR, fs.filename(tplname))
		tpl = env.get_template(tplname)
		fs.write(dest, tpl.render())
		print("Writing template %s" % tplname)
コード例 #15
0
def get_monitoring_dir(ROOT_DIR, MONITORING_ROOT_DIR):
    return fs.join([ROOT_DIR, MONITORING_ROOT_DIR])