Ejemplo n.º 1
0
def train(datasets, options, ctx):
    images_path = datasets['images']

    print('Reading expected image size from starting checkpoint')
    dnnlib.tflib.init_tf()
    _, _, Gs = pickle.load(open(options['checkpoint'], 'rb'))
    width, height = Gs.output_shape[::-1][:2]

    print('Resizing images')
    tmp_resized = tempfile.TemporaryDirectory()
    preprocess_images(images_path, tmp_resized.name, width, height, options['crop_method'])

    print('Converting dataset to TFRecord')
    tmp_dataset = tempfile.TemporaryDirectory()
    create_from_images(tmp_dataset.name, tmp_resized.name, 1)

    print('Creating training config')
    result_dir = runway.utils.generate_uuid()
    os.makedirs(result_dir)
    kwargs = create_training_config(tmp_dataset.name, options['checkpoint'], result_dir, **options)
    kwargs.update(max_steps=options['max_steps'])
    gen = training_loop(**kwargs)

    for (step, metrics, samples, checkpoints) in gen:
        ctx.step = step
        for k, v in metrics.items():
            ctx.metrics[k] = v
        for k, v in samples.items():
            ctx.samples.add(k, v)
        for k, v in checkpoints.items():
            ctx.checkpoints.add(k, v)
Ejemplo n.º 2
0
def selfie2anime():
    img_id = os.environ['id']
    result_id = os.environ['result']
    

    parser = get_parser()
    args = parser.parse_args("--phase test".split())

    with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
        #sess.reuse_variables()
        gan = UGATIT(sess, args)

        # build graph
        gan.build_model()

        # download target img
        download_path = os.path.join(img_path, img_id)

        download_image(images_bucket, img_id, dest=download_path)
        dataset_tool.create_from_images(record_path, img_path, True)
        # os.remove(del_record)
        
        img = gan.infer(download_path)

        image_url = upload_image(img, result_id)

    return download_path, img
Ejemplo n.º 3
0
def project(model, inputs):
    im = inputs['projectionImage']
    if not os.path.exists('./projection'):
        os.mkdir('./projection')
    if not os.path.exists('./projection/imgs'):
        os.mkdir ('./projection/imgs')
    if not os.path.exists('./projection/records'):
        os.mkdir('./projection/records')
    if not os.path.exists('./projection/out'):
        os.mkdir('./projection/out')

    if os.path.isfile('./projection/imgs/project.png'):
        os.remove('./projection/imgs/project.png')

    for f in os.listdir('./projection/records/'):
        if os.path.isfile(os.path.join('./projection/records/', f)):
            os.remove (os.path.join('./projection/records/', f))

    im.save('./projection/imgs/project.png')

    dataset_tool.create_from_images("./projection/records/", "./projection/imgs/", True)

    output = get_projected_real_images("records","./projection/",1,10, inputs['steps'], model)

    #return the last item
    return output[-1]
Ejemplo n.º 4
0
def project_image(proj, src_file, dst_dir, tmp_dir):

    data_dir = '%s/dataset' % tmp_dir
    if os.path.exists(data_dir):
        shutil.rmtree(data_dir)
    image_dir = '%s/images' % data_dir
    tfrecord_dir = '%s/tfrecords' % data_dir
    os.makedirs(image_dir, exist_ok=True)
    shutil.copy(src_file, image_dir + '/')
    dataset_tool.create_from_images(tfrecord_dir, image_dir, shuffle=0)
    dataset_obj = dataset.load_dataset(data_dir=data_dir,
                                       tfrecord_dir='tfrecords',
                                       max_label_size=0,
                                       repeat=False,
                                       shuffle_mb=0)

    print('Projecting image "%s"...' % os.path.basename(src_file))
    images, _labels = dataset_obj.get_minibatch_np(1)
    images = misc.adjust_dynamic_range(images, [0, 255], [-1, 1])
    proj.start(images)

    while proj.get_cur_step() < proj.num_steps:
        print('\r%d / %d ... ' % (proj.get_cur_step(), proj.num_steps),
              end='',
              flush=True)
        proj.step()

    print('\r%-30s\r' % '', end='', flush=True)

    os.makedirs(dst_dir, exist_ok=True)
    filename = os.path.join(dst_dir, os.path.basename(src_file)[:-4] + '.png')
    misc.save_image_grid(proj.get_images(), filename, drange=[-1, 1])
    filename = os.path.join(dst_dir, os.path.basename(src_file)[:-4] + '.npy')
    np.save(filename, proj.get_dlatents()[0])
Ejemplo n.º 5
0
def main():
    ######### 潜在変数空間からそっくりさんを探す部分 ###########

    # make dirctory
    os.makedirs('my/pic', exist_ok=True)

    # 顔画像切り出しモデルの読み込み
    LANDMARKS_MODEL_URL = 'http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2'
    landmarks_model_path = unpack_bz2(
        get_file('shape_predictor_68_face_landmarks.dat.bz2',
                 LANDMARKS_MODEL_URL,
                 cache_subdir='temp'))

    # 顔画像の切り出し
    RAW_IMAGES_DIR = 'sample/pic'
    ALIGNED_IMAGES_DIR = 'my/pic'

    landmarks_detector = LandmarksDetector(landmarks_model_path)
    for img_name in os.listdir(RAW_IMAGES_DIR):
        raw_img_path = os.path.join(RAW_IMAGES_DIR, img_name)
        for i, face_landmarks in enumerate(
                landmarks_detector.get_landmarks(raw_img_path), start=1):
            face_img_name = '%s_%02d.png' % (os.path.splitext(img_name)[0], i)
            aligned_face_path = os.path.join(ALIGNED_IMAGES_DIR, face_img_name)
            image_align(raw_img_path, aligned_face_path, face_landmarks)

    #display_pic('./my/pic/*.*')
    dataset_tool.create_from_images(tfrecord_dir='./my/dataset',
                                    image_dir='./my/pic',
                                    shuffle=0)

    vec_syn = project_real_images(1)  # ()内はマルチ解像度のデータセットを作成した時の画像枚数
    #display_pic('./my/pic/*.*')  # ターゲット画像の表示
    #display(vec_syn)  # 探索した潜在変数によって生成した画像

    # 探索した潜在変数の保存
    #os.makedirs('my/vector', exist_ok=True)
    #np.save('my/vector/vec_syn', vec_syn)

    ##################### ここから笑顔編集(StyleMixing) ####################

    # 探索した潜在変数の読み込み
    #vec_syn = np.load('my/vector/vec_syn.npy')

    # あらかじめ口元の異なる人たちを7通り用意しておいて、それを読み込む
    vec_hashikan = np.load('sample/vectors/vec_hashikan.npy')

    # くっつけて…
    vec_smile = np.vstack((vec_syn, vec_hashikan))

    # ミックス!
    style_mixing(vec_smile, [4, 5], 1.0)
Ejemplo n.º 6
0
 def prepare(self,
             tfrecord_dir,
             shuffle=True,
             with_sub_dirs=False,
             ignore_labels=1):
     if with_sub_dirs:
         out_path = resize_dirs(self.path, 'dataset/resized', dim=self.dim)
         create_from_image_folders(tfrecord_dir, out_path, shuffle,
                                   ignore_labels)
     else:
         print('resizing images ...')
         out_path = resize(self.path, dim=self.dim)
         print('creating records ...')
         create_from_images(tfrecord_dir, out_path, shuffle=True)
Ejemplo n.º 7
0
def project_image(proj, src_file, dst_dir, tmp_dir, video=False):
 
    data_dir = '%s/dataset' % tmp_dir  # ./stylegan2-tmp/dataset
    if os.path.exists(data_dir):
        shutil.rmtree(data_dir)
    image_dir = '%s/images' % data_dir  # ./stylegan2-tmp/dataset/images
    tfrecord_dir = '%s/tfrecords' % data_dir  # ./stylegan2-tmp/dataset/tfrecords
    os.makedirs(image_dir, exist_ok=True)
    # 将源图片文件copy到./stylegan2-tmp/dataset/images下
    shutil.copy(src_file, image_dir + '/')
    # 在./stylegan2-tmp/dataset/tfrecords下生成tfrecord临时文件
    # tfrecord临时文件序列化存储了不同lod下的图像的shape和数据
    # 举例,如果图像是1024x1024,则tfr_file命名从10--2,如:tfrecords-r10.tfrecords...tfrecords-r05.tfrecords...
    dataset_tool.create_from_images(tfrecord_dir, image_dir, shuffle=0)
    # TFRecordDataset类在“dataset.py”中定义,从一组.tfrecords文件中加载数据集到dataset_obj
    # load_dataset是个helper函数,用于构建dataset对象(在TFRecordDataset类创建对象实例时完成)
    dataset_obj = dataset.load_dataset(
        data_dir=data_dir, tfrecord_dir='tfrecords',
        max_label_size=0, repeat=False, shuffle_mb=0
    )
 
    # 生成用于优化迭代的目标图像(组)
    print('Projecting image "%s"...' % os.path.basename(src_file))
    # 取下一个minibatch=1作为Numpy数组
    images, _labels = dataset_obj.get_minibatch_np(1)
    # 把images的取值从[0. 255]区间调整到[-1, 1]区间
    images = misc.adjust_dynamic_range(images, [0, 255], [-1, 1])
    # Projector初始化:start
    proj.start(images)
    if video:
        video_dir = '%s/video' % tmp_dir
        os.makedirs(video_dir, exist_ok=True)
    while proj.get_cur_step() < proj.num_steps:
        print('\r%d / %d ... ' % (proj.get_cur_step(), proj.num_steps), end='', flush=True)
        # Projector优化迭代:step
        proj.step()
        # 如果配置了video选项,将优化过程图像存入./ stylegan2 - tmp / video
        if video:
            filename = '%s/%08d.png' % (video_dir, proj.get_cur_step())
            misc.save_image_grid(proj.get_images(), filename, drange=[-1,1])
    print('\r%-30s\r' % '', end='', flush=True)
 
    # 在目的地目录中保存图像,保存dlatents文件
    os.makedirs(dst_dir, exist_ok=True)
    filename = os.path.join(dst_dir, os.path.basename(src_file)[:-4] + '.png')
    misc.save_image_grid(proj.get_images(), filename, drange=[-1,1])
    filename = os.path.join(dst_dir, os.path.basename(src_file)[:-4] + '.npy')
    np.save(filename, proj.get_dlatents()[0])
Ejemplo n.º 8
0
def predict(number, img_path):
    # 顔画像切り出しモデルの読み込み
    LANDMARKS_MODEL_URL = 'http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2'
    landmarks_model_path = unpack_bz2(
        get_file('shape_predictor_68_face_landmarks.dat.bz2',
                 LANDMARKS_MODEL_URL,
                 cache_subdir='temp'))

    # 顔画像の切り出し
    raw_img_path = img_path  # 後で削除
    print(raw_img_path)
    img_name = raw_img_path.replace('images/', '')
    DATAOUT_DIR = img_path.replace('.jpg', '')  #images/2020xxxx 後で削除(=my)
    ALIGNED_IMAGES_DIR = DATAOUT_DIR + '/pic'
    os.makedirs(ALIGNED_IMAGES_DIR, exist_ok=True)
    TFRECORD_DIR = DATAOUT_DIR + '/dataset'

    landmarks_detector = LandmarksDetector(landmarks_model_path)
    for i, face_landmarks in enumerate(
            landmarks_detector.get_landmarks(raw_img_path), start=1):
        face_img_name = '%s_%02d.png' % (os.path.splitext(img_name)[0], i)
        aligned_face_path = os.path.join(ALIGNED_IMAGES_DIR, face_img_name)
        image_align(raw_img_path, aligned_face_path, face_landmarks)

    dataset_tool.create_from_images(tfrecord_dir=TFRECORD_DIR,
                                    image_dir=ALIGNED_IMAGES_DIR,
                                    shuffle=0)

    vec_syn = my_project_real_images(
        1, data_dir=DATAOUT_DIR)  # ()内はマルチ解像度のデータセットを作成した時の画像枚数

    ##################### ここから笑顔編集(StyleMixing) ####################
    # あらかじめ口元の異なる人たちを7通り用意しておいて、それを読み込む
    vec_hashikan = np.load('sample/vectors/vec_hashikan.npy')
    # くっつけて…
    vec_smile = np.vstack((vec_syn, vec_hashikan))
    # ミックス!
    result_path = my_style_mixing(vec_smile, [4, 5],
                                  1.0,
                                  my_data_dir=DATAOUT_DIR,
                                  number=number)

    return result_path
Ejemplo n.º 9
0
 def prepare(self,
             tfrecord_dir,
             text_dir,
             shuffle=False,
             with_sub_dirs=False,
             ignore_labels=0,
             with_text=True,
             embed_dim=430):
     if with_sub_dirs:
         image_dir = resize_dirs(self.path, 'dataset/resized', dim=self.dim)
         create_image_and_textv2(tfrecord_dir,
                                 image_dir,
                                 text_dir,
                                 shuffle,
                                 ignore_labels,
                                 self.encoder,
                                 model_type=self.model_type,
                                 use_chars=self.use_chars,
                                 embed_dim=embed_dim,
                                 with_sub_dirs=with_sub_dirs)
     elif with_text:
         image_dir = resize(self.path, dim=self.dim)
         create_image_and_textv2(tfrecord_dir,
                                 image_dir,
                                 text_dir,
                                 shuffle,
                                 ignore_labels,
                                 self.encoder,
                                 model_type=self.model_type,
                                 use_chars=self.use_chars,
                                 embed_dim=embed_dim,
                                 with_sub_dirs=with_sub_dirs)
     else:
         print('resizing images ...')
         out_path = resize(self.path, dim=self.dim)
         print('creating records ...')
         create_from_images(tfrecord_dir, out_path, shuffle=True)
Ejemplo n.º 10
0
def project_images(Gs,
                   images_dir,
                   tfrecord_dir,
                   data_dir,
                   num_snapshots,
                   pure_projector=False):
    """setup projector"""
    print('Setting up projector')
    proj = projector.Projector()
    proj.set_network(Gs)

    # generate tfrecords
    nb_images = dataset_tool.create_from_images(str(tfrecord_dir),
                                                str(images_dir), True)

    # loading images from tfrecords
    dataset_obj = training.dataset.load_dataset(tfrecord_dir=tfrecord_dir,
                                                max_label_size=0,
                                                verbose=True,
                                                repeat=False,
                                                shuffle_mb=0)
    assert dataset_obj.shape == Gs.output_shape[1:]

    # project all loaded images
    print('=======================')
    for image_idx in tqdm(range(nb_images)):
        print(f'Projecting image {image_idx + 1}/{nb_images}')

        images, _labels = dataset_obj.get_minibatch_np(1)
        images = training.misc.adjust_dynamic_range(images, [0, 255], [-1, 1])

        run_path = data_dir / f'out_{image_idx}'
        run_path.mkdir()
        run_projector.project_image(proj,
                                    targets=images,
                                    png_prefix=dnnlib.make_run_dir_path(
                                        str(run_path / 'image_')),
                                    num_snapshots=num_snapshots)
        zs = np.random.normal(size=(32, 512))
        images = classifier.gens(zs)
        testresults = classifier.test_by_images(images)
        for j in range(32):
            if np.mean(testresults[j]) > 0.667:
                cv2.imwrite(os.path.join(outputpath, '{}.png'.format(passed)),
                            images[j] * 255)
                passed += 1
                if passed == 20000:
                    flag = False
                    break

# generate tfrecord for StyleGAN finetuning
from dataset_tool import create_from_images
create_from_images(
    os.path.join(outputroot,
                 '{}_filtered_tfrecord_{}'.format(argsk.datatype, tid)),
    outputpath, 1)

# Run StyleGAN finetuning

import copy
import dnnlib
from dnnlib import EasyDict

desc = 'sgan'  # Description string included in result subdir name.
train = EasyDict(run_func_name='training.training_loop.training_loop'
                 )  # Options for training loop.
G = EasyDict(func_name='training.networks_stylegan.G_style'
             )  # Options for generator network.
D = EasyDict(func_name='training.networks_stylegan.D_basic'
             )  # Options for discriminator network.
Ejemplo n.º 12
0
]

# Align Faces
align_images.main('/in', '/out')

num_steps = 1000  #@param {type:"number"}
truncation_psi = 1  #@param {type:"slider", min:0, max:1, step:0.01}

# Get Filenames
files = []
for _, _, f in os.walk(r'/out'):
    for file in f:
        files.append(os.path.splitext(file)[0])

# Convert uploaded images to TFRecords
dataset_tool.create_from_images("/records/", "/out/", True)

# Get number of images
num_images = sum(len(files) for _, _, files in os.walk(r'/out/'))


# Run the projector
def project_real_images(dataset_name, data_dir, num_images, num_snapshots):
    proj = projector.Projector(num_steps)
    proj.set_network(Gs)
    print('Loading images from "%s"...' % dataset_name)

    dataset_obj = training.dataset.load_dataset(data_dir=data_dir,
                                                tfrecord_dir=dataset_name,
                                                max_label_size=0,
                                                verbose=True,
Ejemplo n.º 13
0
import dataset_tool
from training.dataset import load_dataset

#creates tfrecord from dir

tfrecord_dir = "../databases/replay-attack/blink_dataset_replay"
image_dir = '../databases/replay-attack/faces/test_faces/**'
shuffle = True
dataset_tool.create_from_images(tfrecord_dir, image_dir, shuffle)
Ejemplo n.º 14
0
LANDMARKS_MODEL_URL = 'http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2'
landmarks_model_path = unpack_bz2(get_file('shape_predictor_68_face_landmarks.dat.bz2',
                                            LANDMARKS_MODEL_URL, cache_subdir='temp'))

# 顔画像の切り出し
RAW_IMAGES_DIR = 'sample/pic'
ALIGNED_IMAGES_DIR = 'my/pic'

landmarks_detector = LandmarksDetector(landmarks_model_path)
for img_name in os.listdir(RAW_IMAGES_DIR):
    raw_img_path = os.path.join(RAW_IMAGES_DIR, img_name)
    for i, face_landmarks in enumerate(landmarks_detector.get_landmarks(raw_img_path), start=1):
        face_img_name = '%s_%02d.png' % (os.path.splitext(img_name)[0], i)
        aligned_face_path = os.path.join(ALIGNED_IMAGES_DIR, face_img_name)
        image_align(raw_img_path, aligned_face_path, face_landmarks)
        
#display_pic('./my/pic/*.*')

dataset_tool.create_from_images(tfrecord_dir='./my/dataset',image_dir='./my/pic',shuffle=0)

vec_syn = project_real_images(5)  # ()内はマルチ解像度のデータセットを作成した時の画像枚数
#display_pic('./my/pic/*.*')  # ターゲット画像の表示
#display(vec_syn)  # 探索した潜在変数によって生成した画像

# 探索した潜在変数の保存
os.makedirs('sample/vectors', exist_ok=True)
np.save('sample/vectors/vec_hashikan', vec_syn)

# 探索した潜在変数の読み込み
vec_syn = np.load('sample/vectors/vec_hashikan.npy')
print(vec_syn.shape)
Ejemplo n.º 15
0
 def prepare(self, records_path):
     print('resizing images ...')
     out_path = resize(self.path, dim=self.dim)
     print('creating records ...')
     create_from_images(records_path, out_path, shuffle=True)
Ejemplo n.º 16
0
# Convert uploaded images to TFRecords
import dataset_tool
import pretrained_networks
import dnnlib
import dnnlib.tflib as tflib

dataset_tool.create_from_images(
    "/zhome/ca/6/92701/Desktop/Master_Thesis/stylegan2/rl_images/256/image_for_latents/records/",
    "/zhome/ca/6/92701/Desktop/Master_Thesis/stylegan2/rl_images/256/image_for_latents/images/",
    True)

# Run the projector
import run_projector
import projector
import training.dataset
import training.misc
import os

_G, _D, Gs = pretrained_networks.load_networks(
    "/zhome/ca/6/92701/Desktop/Master_Thesis/stylegan2/Old_SG2_pkl/network-snapshot-012820.pkl"
)


def project_real_images(dataset_name, data_dir, num_images, num_snapshots):
    proj = projector.Projector()
    proj.set_network(Gs)

    print('Loading images from "%s"...' % dataset_name)
    dataset_obj = training.dataset.load_dataset(data_dir=data_dir,
                                                tfrecord_dir=dataset_name,
                                                max_label_size=0,
Ejemplo n.º 17
0
# The directories only exist on the VM since images are too big to push on git
# Convert images to tfrecords and add labels(style+genre) as well
import dataset_tool as dt
import preprocess

# resize images and then save them
corvett = resize_images.Converter(image_resolution=(512, 512),
                                  file='subset_images2')
corvett.convert()

# load images from the saved folder and convert to tfrecords
dt.create_from_images(
    'datasets//subset_images_tfr',
    '..//..//wikiart-master//wikiart-saved//subset_images_resized', True)
Ejemplo n.º 18
0
def easygen_train(model_path,
                  images_path,
                  dataset_path,
                  start_kimg=7000,
                  max_kimg=25000,
                  schedule='',
                  seed=1000):
    #import stylegan
    #from stylegan import config
    ##from stylegan import dnnlib
    #from stylegan.dnnlib import EasyDict

    #images_dir = '/content/raw'
    #max_kimg = 25000
    #start_kimg = 7000
    #schedule = ''
    #model_in = '/content/karras2019stylegan-cats-256x256.pkl'

    #dataset_dir = '/content/stylegan_dataset' #os.path.join(cwd, 'cache', 'stylegan_dataset')

    import config
    config.data_dir = '/content/datasets'
    config.results_dir = '/content/results'
    config.cache_dir = '/contents/cache'
    run_dir_ignore = [
        '/contents/results', '/contents/datasets', 'contents/cache'
    ]
    import copy
    import dnnlib
    from dnnlib import EasyDict
    from metrics import metric_base
    # Prep dataset
    import dataset_tool
    print("prepping dataset...")
    dataset_tool.create_from_images(tfrecord_dir=dataset_path,
                                    image_dir=images_path,
                                    shuffle=False)
    # Set up training parameters
    desc = 'sgan'  # Description string included in result subdir name.
    train = EasyDict(run_func_name='training.training_loop.training_loop'
                     )  # Options for training loop.
    G = EasyDict(func_name='training.networks_stylegan.G_style'
                 )  # Options for generator network.
    D = EasyDict(func_name='training.networks_stylegan.D_basic'
                 )  # Options for discriminator network.
    G_opt = EasyDict(beta1=0.0, beta2=0.99,
                     epsilon=1e-8)  # Options for generator optimizer.
    D_opt = EasyDict(beta1=0.0, beta2=0.99,
                     epsilon=1e-8)  # Options for discriminator optimizer.
    G_loss = EasyDict(func_name='training.loss.G_logistic_nonsaturating'
                      )  # Options for generator loss.
    D_loss = EasyDict(func_name='training.loss.D_logistic_simplegp',
                      r1_gamma=10.0)  # Options for discriminator loss.
    dataset = EasyDict()  # Options for load_dataset().
    sched = EasyDict()  # Options for TrainingSchedule.
    grid = EasyDict(
        size='1080p',
        layout='random')  # Options for setup_snapshot_image_grid().
    #metrics       = [metric_base.fid50k]                                                  # Options for MetricGroup.
    submit_config = dnnlib.SubmitConfig()  # Options for dnnlib.submit_run().
    tf_config = {'rnd.np_random_seed': seed}  # Options for tflib.init_tf().
    # Dataset
    desc += '-custom'
    dataset = EasyDict(tfrecord_dir=dataset_path)
    train.mirror_augment = True
    # Number of GPUs.
    desc += '-1gpu'
    submit_config.num_gpus = 1
    sched.minibatch_base = 4
    sched.minibatch_dict = {
        4: 128,
        8: 128,
        16: 128,
        32: 64,
        64: 32,
        128: 16,
        256: 8,
        512: 4
    }  #{4: 256, 8: 256, 16: 128, 32: 64, 64: 32, 128: 16, 256: 16}
    # Default options.
    train.total_kimg = max_kimg
    sched.lod_initial_resolution = 8
    sched.G_lrate_dict = {128: 0.0015, 256: 0.002, 512: 0.003, 1024: 0.003}
    sched.D_lrate_dict = EasyDict(sched.G_lrate_dict)
    # schedule
    schedule_dict = {
        4: 160,
        8: 140,
        16: 120,
        32: 100,
        64: 80,
        128: 60,
        256: 40,
        512: 30,
        1024: 20
    }  #{4: 2, 8:2, 16:2, 32:2, 64:2, 128:2, 256:2, 512:2, 1024:2} # Runs faster for small datasets
    if len(schedule) >= 5 and schedule[0] == '{' and schedule[
            -1] == '}' and ':' in schedule:
        # is schedule a string of a dict?
        try:
            temp = eval(schedule)
            schedule_dict = dict(temp)
            # assert: it is a dict
        except:
            pass
    elif len(schedule) > 0:
        # is schedule an int?
        try:
            schedule_int = int(schedule)
            #assert: schedule is an int
            schedule_dict = {}
            for i in range(1, 10):
                schedule_dict[int(math.pow(2, i + 1))] = schedule_int
        except:
            pass
    print('schedule:', str(schedule_dict))
    sched.tick_kimg_dict = schedule_dict
    # resume kimg
    resume_kimg = start_kimg
    # path to model
    resume_run_id = model_path
    # tick snapshots
    image_snapshot_ticks = 1
    network_snapshot_ticks = 1
    # Submit run
    kwargs = EasyDict(train)
    kwargs.update(G_args=G,
                  D_args=D,
                  G_opt_args=G_opt,
                  D_opt_args=D_opt,
                  G_loss_args=G_loss,
                  D_loss_args=D_loss)
    kwargs.update(dataset_args=dataset,
                  sched_args=sched,
                  grid_args=grid,
                  tf_config=tf_config)
    kwargs.update(resume_kimg=resume_kimg, resume_run_id=resume_run_id)
    kwargs.update(image_snapshot_ticks=image_snapshot_ticks,
                  network_snapshot_ticks=network_snapshot_ticks)
    kwargs.submit_config = copy.deepcopy(submit_config)
    kwargs.submit_config.run_dir_root = dnnlib.submission.submit.get_template_from_path(
        config.result_dir)
    kwargs.submit_config.run_dir_ignore += config.run_dir_ignore
    kwargs.submit_config.run_desc = desc
    dnnlib.submit_run(**kwargs)
Ejemplo n.º 19
0
    for i in range(len(zs) - 1):
        for index in range(steps):
            fraction = index / float(steps)
            out.append(zs[i + 1] * fraction + zs[i] * (1 - fraction))
    return out


# Taken from https://github.com/alexanderkuk/log-progress
def log_progress(sequence, every=1, size=None, name='Items'):
    print()


# Convert uploaded images to TFRecords
import dataset_tool
print(dataset_tool.__file__)
dataset_tool.create_from_images("./projection/records/", "./projection/imgs/",
                                True)

# Run the projector
import run_projector
import projector
import dream_projector

import importlib
importlib.reload(dream_projector)

import training.dataset
import training.misc
import os


def project_real_images(dataset_name, data_dir, num_images, num_snapshots):