Beispiel #1
0
def launch():
    global current_render_distance, current_username, current_process
    launch_button.state(['disabled'])
    if current_process is None or current_process.poll() is not None:
        current_process = launcher.run(get_features(),
                                       current_render_distance.get(),
                                       current_username.get())
    return 0
def sequentialStep():
  print('Starting with ', globalvars.appName)

  # creates child processes, each is a sniper run. sim.out is saved in
  # individual directories
  print('++Launcher++')
  launcher.run(permutations, parameterNames, False, True)

  # iterates over the sim.out and macpat files in the relevant directories to
  # collect stats. Stats are logged in a file 'efficiency.txt' in app directory
  print('++Stat-Collection++')
  stats.collectStats(permutations)

  # now that we have data for all the  configurations, we can select the 'good' ones
  print('++Selection++')
  goodConfigs = selection.pickGood([x[1:] for x in parameters])
  goodConfigDict[globalvars.appName] = goodConfigs
def parallelDetailed(appName, pinballLoc, outputDirBase):
  # log the output of the process to a specific file
  sys.stdout = open(outputDirBase+"plog.out", "w")
  appfeatures = list(list())
   
  #append the names of the features 
  appfeatures.append(featureNames)

  goodConfigs = goodConfigDict[appName]

  # now that we have the good configs, we need to launch sniper runs again for
  # these configs with detailed stat (counter-data) collection enabled
  #print('++Detailed-Run++')
  launcher.run(goodConfigs, parameterNames, True, False, pinballLoc, outputDirBase)

  # from the detailed runs, we now extract the feature vectors - this forms the
  # training data. Feature vectors are logged to a file
  #print('++Extraction++')
  featureGeneration.extract(goodConfigs, appfeatures, outputDirBase)
Beispiel #4
0
# Launcher
# http://blog.tvalacarta.info/plugin-xbmc/tvalacarta/
#------------------------------------------------------------

# Constantes
__plugin__  = "tvalacarta"
__author__  = "tvalacarta"
__url__     = "http://blog.tvalacarta.info/plugin-xbmc/tvalacarta/"
__date__    = "1 Septiembre 2010"
__version__ = "2.6"

import os
import sys
import xbmc

xbmc.output("[default.py] tvalacarta init...")

# Configura los directorios donde hay librerías
#librerias = xbmc.translatePath( os.path.join( os.getcwd(), 'resources', 'lib' ) )
#sys.path.append (librerias)
#librerias = xbmc.translatePath( os.path.join( os.getcwd(), 'channels' ) )
#sys.path.append (librerias)
#librerias = xbmc.translatePath( os.path.join( os.getcwd(), 'servers' ) )
#sys.path.append (librerias)
librerias = xbmc.translatePath( os.path.join( os.getcwd(), 'youtubeAPI' ) )
sys.path.append (librerias)

# Ejecuta el programa principal
import launcher
launcher.run()
Beispiel #5
0
                               f.detach().cpu()],
                              dim=3)
                canvas.append(r)
        return canvas

    canvas = make(paths[:40])
    canvas = torch.cat(canvas, dim=0)

    save_image(canvas * 0.5 + 0.5,
               'make_figures/output/reconstructions_bed_1.png',
               nrow=4,
               pad_value=1.0)

    canvas = make(paths[40:80])
    canvas = torch.cat(canvas, dim=0)

    save_image(canvas * 0.5 + 0.5,
               'make_figures/output/reconstructions_bed_2.png',
               nrow=4,
               pad_value=1.0)


if __name__ == "__main__":
    gpu_count = 1
    run(sample,
        get_cfg_defaults(),
        description='ALAE-reconstruction-bedroom',
        default_config='configs/bedroom.yaml',
        world_size=gpu_count,
        write_log=False)
Beispiel #6
0
                                      langevin_steps=cfg.LANGEVIN.STEP,
                                      lr=cfg.LANGEVIN.LR)

            with torch.no_grad():
                latents = torch.cat((source_latent[:nrow], latents))
                latents = latents.unsqueeze(1).repeat(1,
                                                      ae.mapping_fl.num_layers,
                                                      1)

                out = decode(ae, latents, cfg)

                out = torch.cat((source_img[:nrow], out), dim=0)
                utils.save_image(
                    out,
                    f"{run_dir}/{str(iterations).zfill(6)}.png",
                    nrow=nrow,
                    normalize=True,
                    padding=0,
                    range=(-1, 1),
                )


if __name__ == "__main__":
    gpu_count = 1
    run(train,
        get_cfg_defaults(),
        description='Image-Translation',
        default_config='configs/celeba-hq.yaml',
        world_size=gpu_count,
        write_log=False)
    model.eval()

    layer_count = cfg.MODEL.LAYER_COUNT

    decoder = nn.DataParallel(decoder)

    im_size = 2**(cfg.MODEL.LAYER_COUNT + 1)
    with torch.no_grad():
        draw_uncurated_result_figure(cfg,
                                     'make_figures/output/%s/generations.jpg' %
                                     cfg.NAME,
                                     model,
                                     cx=0,
                                     cy=0,
                                     cw=im_size,
                                     ch=im_size,
                                     rows=6,
                                     lods=[0, 0, 0, 1, 1, 2],
                                     seed=5)


if __name__ == "__main__":
    gpu_count = 1
    run(sample,
        get_cfg_defaults(),
        description='ALAE-generations',
        default_config='configs/ffhq.yaml',
        world_size=gpu_count,
        write_log=False)
        'dlatent_avg': dlatent_avg
    }

    checkpointer = Checkpointer(cfg, model_dict, {}, logger=logger, save=False)

    checkpointer.load()

    model.eval()

    layer_count = cfg.MODEL.LAYER_COUNT

    logger.info("Generating...")

    decoder = nn.DataParallel(decoder)
    mapping_fl = nn.DataParallel(mapping_fl)

    with torch.no_grad():
        gen = ImageGenerator(cfg, num_samples=60000, minibatch_gpu=8)
        gen.evaluate(logger, mapping_fl, decoder,
                     cfg.DATASET.MAX_RESOLUTION_LEVEL - 2)


if __name__ == "__main__":
    gpu_count = 1
    run(sample,
        get_cfg_defaults(),
        description='ALAE-generate-images-for-attribute-classifications',
        default_config='configs/ffhq.yaml',
        world_size=gpu_count,
        write_log=False)
Beispiel #9
0
Datei: fid.py Projekt: vii33/ALAE
    extra_checkpoint_data = checkpointer.load()
    last_epoch = list(extra_checkpoint_data['auxiliary']
                      ['scheduler'].values())[0]['last_epoch']
    logger.info("Model trained for %d epochs" % last_epoch)

    model.eval()

    layer_count = cfg.MODEL.LAYER_COUNT

    logger.info("Evaluating FID metric")

    model.decoder = nn.DataParallel(decoder)

    with torch.no_grad():
        ppl = FID(cfg,
                  num_images=50000,
                  minibatch_size=16 * torch.cuda.device_count())
        ppl.evaluate(logger, mapping_fl, model.decoder, model,
                     cfg.DATASET.MAX_RESOLUTION_LEVEL - 2)


if __name__ == "__main__":
    gpu_count = 1
    run(sample,
        get_cfg_defaults(),
        description='ALAE-fid',
        default_config='configs/ffhq.yaml',
        world_size=gpu_count,
        write_log="metrics/fid_score.txt")
	def launchEmulator(self):
		path=r'\Symbian\9.1\S60_3rd\Epoc32\release\winscw\udeb\bluewhaleplatform.exe'
		print "launch ", path
		child = launcher.run(path)
		return child
Beispiel #11
0
 def launchEmulator(self):
     path = r'\Symbian\9.1\S60_3rd\Epoc32\release\winscw\udeb\bluewhaleplatform.exe'
     print "launch ", path
     child = launcher.run(path)
     return child
Beispiel #12
0
    model.eval()

    im_size = 2**(cfg.MODEL.LAYER_COUNT + 1)
    seed = np.random.randint(0, 999999)
    print("seed:", seed)
    with torch.no_grad():
        path = './make_figures/output'
        os.makedirs(path, exist_ok=True)
        os.makedirs(os.path.join(path, cfg.NAME), exist_ok=True)
        draw_uncurated_result_figure(
            cfg,
            './make_figures/output/%s/generations.jpg' % cfg.NAME,
            model,
            cx=0,
            cy=0,
            cw=im_size,
            ch=im_size,
            rows=6,
            lods=[0, 0, 0, 1, 1, 2],
            seed=seed)


if __name__ == "__main__":
    gpu_count = 1
    run(sample,
        get_cfg_defaults(),
        description='SoftIntroVAE-generations',
        default_config='./configs/ffhq256.yaml',
        world_size=gpu_count,
        write_log=False)
Beispiel #13
0
                                 requires_grad=True).cuda() / 127.5 - 1.
                if x.shape[0] == 4:
                    x = x[:3]

                while x.shape[2] != model.decoder.layer_to_resolution[6]:
                    x = F.avg_pool2d(x, 2, 2)

                latents = encode(x[None, ...].cuda())
                f = decode(latents)
                r = torch.cat([x[None, ...].detach().cpu(),
                               f.detach().cpu()],
                              dim=3)
                os.makedirs('make_figures/output/pioneer/', exist_ok=True)
                save_image(f.detach().cpu() * 0.5 + 0.5,
                           'make_figures/output/pioneer/%s_alae.png' %
                           filename[:-9],
                           nrow=1,
                           pad_value=1.0)

    make(paths)


if __name__ == "__main__":
    gpu_count = 1
    run(sample,
        get_cfg_defaults(),
        description='ALAE-reconstructions-celeb-hq256-on-pioneer-examples',
        default_config='configs/celeba-hq256.yaml',
        world_size=gpu_count,
        write_log=False)
Beispiel #14
0
            lod2batch.step()
            # if lod2batch.is_time_to_save():
            #     checkpointer.save("model_tmp_intermediate_lod%d" % lod_for_saving_model)
            if lod2batch.is_time_to_report():
                save_sample(lod2batch, tracker, sample, samplez, x, logger, model_s, cfg, encoder_optimizer,
                            generator_optimizer, output_folder)

        scheduler.step()

        if epoch % 20 == 0:
            save(epoch)

        save_sample(lod2batch, tracker, sample, samplez, x, logger, model_s, cfg, encoder_optimizer, generator_optimizer, output_folder)

    logger.info("Training finish!... save training results")
    if epoch is not None:
        save(epoch)

    best_model_name, best_model_score = scores_list[0]
    for model_name, model_score in scores_list:
        if model_score >= best_model_score:
            best_model_name, best_model_score = model_name, model_score

    checkpointer.tag_best_checkpoint(best_model_name)


if __name__ == "__main__":
    gpu_count = torch.cuda.device_count()
    run(train, get_cfg_defaults(), description='', default_config='configs/mnist_os.yaml',
        world_size=gpu_count)
Beispiel #15
0
            lod_for_saving_model = lod2batch.lod
            lod2batch.step()
            if local_rank == 0:
                if lod2batch.is_time_to_save():
                    checkpointer.save("model_tmp_intermediate_lod%d" %
                                      lod_for_saving_model)
                if lod2batch.is_time_to_report():
                    save_sample(lod2batch, tracker, sample, samplez, x, logger,
                                model_s, cfg, encoder_optimizer,
                                decoder_optimizer)

        scheduler.step()

        if local_rank == 0:
            checkpointer.save("model_tmp_lod%d" % lod_for_saving_model)
            save_sample(lod2batch, tracker, sample, samplez, x, logger,
                        model_s, cfg, encoder_optimizer, decoder_optimizer)

    logger.info("Training finish!... save training results")
    if local_rank == 0:
        checkpointer.save("model_final").wait()


if __name__ == "__main__":
    gpu_count = torch.cuda.device_count()
    run(train,
        get_cfg_defaults(),
        description='StyleGAN',
        default_config='configs/ffhq.yaml',
        world_size=gpu_count)
Beispiel #16
0
    for i in range(src_len):
        save_image(src_originals[i] * 0.5 + 0.5, 'style_mixing/output/%s/source_%d.png' % (cfg.NAME, i))
        place(canvas, src_originals[i], 1 + i, 0)

    for i in range(dst_len):
        save_image(dst_originals[i] * 0.5 + 0.5, 'style_mixing/output/%s/dst_coarse_%d.png' % (cfg.NAME, i))
        place(canvas, dst_originals[i], 0, 1 + i)

    style_ranges = [range(0, 4)] * 3 + [range(4, 8)] * 2 + [range(8, layer_count * 2)]

    def mix_styles(style_src, style_dst, r):
        style = style_dst.clone()
        style[:, r] = style_src[:, r]
        return style

    for row in range(dst_len):
        row_latents = torch.stack([dst_latents[row]] * src_len)
        style = mix_styles(src_latents, row_latents, style_ranges[row])
        rec = model.decoder(style, layer_count - 1, 1, noise=True)
        for j in range(rec.shape[0]):
            save_image(rec[j] * 0.5 + 0.5, 'style_mixing/output/%s/rec_coarse_%d_%d.png' % (cfg.NAME, row, j))
            place(canvas, rec[j], 1 + j, 1 + row)

    save_image(torch.Tensor(canvas), 'style_mixing/output/%s/stylemix.png' % cfg.NAME)


if __name__ == "__main__":
    gpu_count = 1
    run(main, get_cfg_defaults(), description='ALAE-style-mixing', default_config='configs/ffhq.yaml',
        world_size=gpu_count, write_log=False)
Beispiel #17
0
# Launcher
# http://blog.tvalacarta.info/plugin-xbmc/tvalacarta/
#------------------------------------------------------------

# Constantes
__plugin__ = "tvalacarta"
__author__ = "tvalacarta"
__url__ = "http://blog.tvalacarta.info/plugin-xbmc/tvalacarta/"
__date__ = "1 Septiembre 2010"
__version__ = "2.6"

import os
import sys
import xbmc

xbmc.output("[default.py] tvalacarta init...")

# Configura los directorios donde hay librerías
#librerias = xbmc.translatePath( os.path.join( os.getcwd(), 'resources', 'lib' ) )
#sys.path.append (librerias)
#librerias = xbmc.translatePath( os.path.join( os.getcwd(), 'channels' ) )
#sys.path.append (librerias)
#librerias = xbmc.translatePath( os.path.join( os.getcwd(), 'servers' ) )
#sys.path.append (librerias)
librerias = xbmc.translatePath(os.path.join(os.getcwd(), 'youtubeAPI'))
sys.path.append(librerias)

# Ejecuta el programa principal
import launcher
launcher.run()
Beispiel #18
0
    extra_checkpoint_data = checkpointer.load()
    last_epoch = list(extra_checkpoint_data['auxiliary']
                      ['scheduler'].values())[0]['last_epoch']
    logger.info("Model trained for %d epochs" % last_epoch)

    model.eval()

    layer_count = cfg.MODEL.LAYER_COUNT

    logger.info("Evaluating LPIPS metric")

    decoder = nn.DataParallel(decoder)
    encoder = nn.DataParallel(encoder)

    with torch.no_grad():
        ppl = LPIPS(cfg,
                    num_images=10000,
                    minibatch_size=16 * torch.cuda.device_count())
        ppl.evaluate(logger, mapping_fl, decoder, encoder,
                     cfg.DATASET.MAX_RESOLUTION_LEVEL - 2)


if __name__ == "__main__":
    gpu_count = 1
    run(sample,
        get_cfg_defaults(),
        description='ALAE-lpips',
        default_config='configs/experiment_celeba.yaml',
        world_size=gpu_count,
        write_log="metrics/lpips_score.txt")
Beispiel #19
0
    novelty_detector = model_s, bin_edges, counts, gennorm_param,

    percentages = cfg.DATASET.PERCENTAGES
    # percentages = [50]

    results = {}
    for p in percentages:
        # plt.figure(num=None, figsize=(8, 6), dpi=180, facecolor='w', edgecolor='k')
        alpha, beta, threshold, _ = compute_threshold_coeffs(
            cfg, logger, valid_set, inliner_classes, p, novelty_detector)
        with open(
                os.path.join(output_folder,
                             'coeffs_percentage_%d.txt' % int(p)), 'w') as f:
            f.write("%f %f %f\n" % (alpha, beta, threshold))
        results[p] = test(cfg, logger, test_set, inliner_classes, p,
                          novelty_detector, alpha, beta, threshold,
                          output_folder)

    return results


if __name__ == "__main__":
    run(main,
        get_cfg_defaults(),
        description='',
        default_config='configs/mnist.yaml',
        world_size=1,
        folding_id=0,
        inliner_classes=[3])
                canvas.append(r)
        return canvas

    def chunker_list(seq, n):
        return [seq[i * n:(i + 1) * n] for i in range((len(seq) + n - 1) // n)]

    paths = chunker_list(paths, 8 * 3)

    path = './make_figures/output'
    os.makedirs(path, exist_ok=True)
    os.makedirs(os.path.join(path, cfg.NAME), exist_ok=True)

    for i, chunk in enumerate(paths):
        canvas = make(chunk)
        canvas = torch.cat(canvas, dim=0)

        save_path = './make_figures/output/%s/reconstructions_%d.png' % (
            cfg.NAME, i)
        os.makedirs(os.path.dirname(save_path), exist_ok=True)
        save_image(canvas * 0.5 + 0.5, save_path, nrow=3, pad_value=1.0)


if __name__ == "__main__":
    gpu_count = 1
    run(sample,
        get_cfg_defaults(),
        description='SoftIntroVAE-figure-reconstructions-paged',
        default_config='./configs/ffhq256.yaml',
        world_size=gpu_count,
        write_log=False)
        save_image(src_originals[i] * 0.5 + 0.5, './style_mixing/output/%s/source_%d.png' % (cfg.NAME, i))
        place(canvas, src_originals[i], 1 + i, 0)

    for i in range(dst_len):
        save_image(dst_originals[i] * 0.5 + 0.5, './style_mixing/output/%s/dst_coarse_%d.png' % (cfg.NAME, i))
        place(canvas, dst_originals[i], 0, 1 + i)

    style_ranges = [range(0, 4)] * 3 + [range(4, 8)] * 2 + [range(8, layer_count * 2)]

    def mix_styles(style_src, style_dst, r):
        style = style_dst.clone()
        style[:, r] = style_src[:, r]
        return style

    for row in range(dst_len):
        row_latents = torch.stack([dst_latents[row]] * src_len)
        style = mix_styles(src_latents, row_latents, style_ranges[row])
        rec = model.decoder(style, layer_count - 1, 1, noise=True)
        for j in range(rec.shape[0]):
            save_image(rec[j] * 0.5 + 0.5, './style_mixing/output/%s/rec_coarse_%d_%d.png' % (cfg.NAME, row, j))
            place(canvas, rec[j], 1 + j, 1 + row)

    save_image(torch.Tensor(canvas), './style_mixing/output/%s/stylemix.png' % cfg.NAME)


if __name__ == "__main__":
    gpu_count = 1
    run(main, get_cfg_defaults(), description='SandwichStyleVAE-style-mixing',
        default_config='./configs/celeba-hq256-generate.yaml',
        world_size=gpu_count, write_log=False)
Beispiel #22
0
                lod_for_saving_model = lod2batch.lod
                lod2batch.step()
                if local_rank == 0:
                    if lod2batch.is_time_to_save():
                        checkpointer.save("model_tmp_intermediate_lod%d" %
                                          lod_for_saving_model)
                    if lod2batch.is_time_to_report():
                        save_sample(lod2batch, tracker, sample, samplez, x,
                                    logger, model_s, cfg, encoder_optimizer,
                                    decoder_optimizer)

        scheduler.step()

        if local_rank == 0:
            checkpointer.save("model_tmp_lod%d" % lod_for_saving_model)
            save_sample(lod2batch, tracker, sample, samplez, x, logger,
                        model_s, cfg, encoder_optimizer, decoder_optimizer)

    logger.info("Training finish!... save training results")
    if local_rank == 0:
        checkpointer.save("model_final").wait()


if __name__ == "__main__":
    gpu_count = torch.cuda.device_count()
    run(train,
        get_cfg_defaults(),
        description='StyleGAN',
        default_config='configs/experiment_celeba_sep.yaml',
        world_size=gpu_count)
                    save_sample(lod2batch, tracker, sample, samplez, x, logger,
                                model_s, cfg, encoder_optimizer,
                                decoder_optimizer)

        scheduler.step()
        mean_diff_kl = np.mean(diff_kls)
        print("mean diff kl: ", mean_diff_kl)

        if epoch > num_vae_epochs - 1:
            kls_real.append(np.mean(batch_kls_real))
            kls_fake.append(np.mean(batch_kls_fake))
            rec_errs.append(np.mean(batch_rec_errs))

        if local_rank == 0:
            checkpointer.save("model_tmp_lod%d" % lod_for_saving_model)
            save_sample(lod2batch, tracker, sample, samplez, x, logger,
                        model_s, cfg, encoder_optimizer, decoder_optimizer)

    logger.info("Training finish!... save training results")
    if local_rank == 0:
        checkpointer.save("model_final").wait()


if __name__ == "__main__":
    gpu_count = torch.cuda.device_count()
    run(train,
        get_cfg_defaults(),
        description='StyleSoftIntroVAE',
        default_config='./configs/ffhq256.yaml',
        world_size=gpu_count)