Ejemplo n.º 1
0
def test_get_args():
    sys.argv = ['', '--port', '8000']
    args = get_args()
    assert args.port == 8000
    assert args.servers == []
    assert isinstance(args.wallet, int)
    assert args.wallet > 0
Ejemplo n.º 2
0
    def args(self, request):
        dset_name = request.param[0]
        alg = request.param[1]
        sys_args = list([
            '--name=tmp_test', '--alg={}'.format(alg),
            '--dset_dir=./data/test_dsets', '--dset_name={}'.format(dset_name),
            '--z_dim={}'.format(Z_DIM), '--batch_size={}'.format(BATCH_SIZE),
            '--all_iter={}'.format(ALL_ITER), '--evaluate_iter={}'.format(
                MAX_ITER * 2), '--ckpt_save_iter={}'.format(CKPT_SAVE_ITER),
            '--max_iter={}'.format(MAX_ITER),
            '--controlled_capacity_increase={}'.format('true'), '--loss_terms'
        ])
        sys_args.extend(VAE_LOSSES)

        encoder = (c.ENCODERS[1], )
        if alg == 'AE':
            encoder = (c.ENCODERS[0], )
        elif alg == 'IFCVAE':
            encoder = c.ENCODERS[1], c.ENCODERS[0]
        sys_args.append('--encoder')
        sys_args.extend(encoder)

        discriminator = (c.DISCRIMINATORS[0], )
        sys_args.append('--discriminator')
        sys_args.extend(discriminator)

        decoder = (c.DECODERS[0], )
        sys_args.append('--decoder')
        sys_args.extend(decoder)

        label_tiler = (c.TILERS[0], )
        sys_args.append('--label_tiler')
        sys_args.extend(label_tiler)

        if 'CVAE' in alg:
            if dset_name == c.DATASETS[1]:
                include_labels = '1', '2', '3'
            elif dset_name == 'celebA':
                include_labels = 'Wearing_Hat', 'Arched_Eyebrows'
            else:
                raise NotImplementedError
            sys_args.append('--include_labels')
            sys_args.extend(include_labels)

        args = get_args(sys_args)

        logging.info('sys_args', sys_args)
        logging.info('Testing {}:{}'.format(dset_name, alg))
        yield args

        # clean up: delete output and ckpt files
        train_dir = os.path.join(args.train_output_dir, args.name)
        test_dir = os.path.join(args.test_output_dir, args.name)
        ckpt_dir = os.path.join(args.ckpt_dir, args.name)

        shutil.rmtree(train_dir, ignore_errors=True)
        shutil.rmtree(test_dir, ignore_errors=True)
        shutil.rmtree(ckpt_dir, ignore_errors=True)
Ejemplo n.º 3
0
            # Update learning rates
            ########################
            self.g_lr_scheduler.step()
            self.d_lr_scheduler.step()


if __name__ == '__main__':
    from main import get_args
    from torchviz import make_dot
    from torch.autograd import Variable
    from arch.generators import ResnetGenerator
    inputs = torch.randn(1, 3, 256, 256)

    # Da = cycleGAN(get_args()).Gab
    # y = Da(Variable(inputs))

    Gab = cycleGAN(get_args()).Gab
    y = Gab(Variable(inputs))
    print(Gab)
    pass

    # dot = make_dot(y, params=dict(Da.named_parameters()))
    # dot.save('d', 'images')

    # dot = make_dot(y, params=dict(Gab.named_parameters()))
    # dot.save('g', 'images')

    # dot.format = 'jpg'
    # dot.render('g', )
Ejemplo n.º 4
0
import os
import numpy as np
from main import get_args
from nns import linear_fit
from embed_cgk import random_seed, cgk_string, distance


threshold = 1000

args, data_handler, data_file = get_args()
train_dist, query_dist = data_handler.train_dist, data_handler.query_dist
train_idx = np.where(train_dist < threshold)
query_idx = np.where(query_dist < threshold)

dis_dir = "cgk_dist/{}".format(args.dataset)
os.makedirs(dis_dir, exist_ok=True)
if not os.path.isfile(dis_dir + "train_idx.npy"):
    h = random_seed(data_handler.M, data_handler.C)
    xq = cgk_string(h, data_handler.xq.sig, data_handler.M)
    xt = cgk_string(h, data_handler.xt.sig, data_handler.M)
    xb = cgk_string(h, data_handler.xb.sig, data_handler.M)

    train_dist_hm = distance(xt, xt)
    query_dist_hm = distance(xq, xb)

    np.save(dis_dir + "train_dist_hm.npy", train_dist_hm)
    np.save(dis_dir + "query_dist_hm.npy", query_dist_hm)
else:
    train_dist_hm = np.load(dis_dir + "train_dist_hm.npy")
    query_dist_hm = np.load(dis_dir + "query_dist_hm.npy")
Ejemplo n.º 5
0
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import main, helper, os, dataload
from model import LostNet
import tensorflow as tf
import multi_bleu
from rank_metrics import mean_average_precision, NDCG, MRR

args = main.get_args()


def test(model, test_dataset, dictionary, sess):
    batches_idx = helper.get_batches_idx(len(test_dataset), args.batch_size)
    print('number of test batches = ', len(batches_idx))

    num_batches = len(batches_idx)
    predicts, targets = [], []
    map, mrr, ndcg_1, ndcg_3, ndcg_5, ndcg_10 = 0, 0, 0, 0, 0, 0
    for batch_no in range(1, num_batches + 1):  #1,...,num_batches
        batch_idx = batches_idx[batch_no - 1]
        batch_data = [test_dataset.dataset[i] for i in batch_idx]

        #将一批数据转换为模型输入的格式
        (hist_query_input, hist_doc_input, session_num, hist_query_num,
         hist_query_len, hist_click_num, hist_doc_len, cur_query_input,
         cur_doc_input, cur_query_num, cur_query_len, cur_click_num,
         cur_doc_len, query, q_len, doc, d_len, y, next_q, next_q_len,
         maximum_iterations) = helper.batch_to_tensor(batch_data,
                                                      args.max_query_len,
                                                      args.max_doc_len)
Ejemplo n.º 6
0
from main import make_painting, get_args

if __name__ == '__main__':
    args = get_args(name="test.jpg")
    args.n_iss_iters = 100
    args.n_samples_per_iss_iter = 50
    args.nst_n_iterations = 150
    args.painting_size_x_mm = 120
    args.painting_size_y_mm = 120
    make_painting(args=args)
Ejemplo n.º 7
0
# Copyright(c) Eric Steinberger 2018

from src.config import BrushConfig
from src.learn_strokes.GA import GA
from main import get_args

ga = GA(args=get_args(name="StrokeGen"),
        brush_to_paint_with=BrushConfig.B6,
        brush_currently_on=BrushConfig.NOTHING_MOUNTED,
        stroke_name="test",
        n_different_strokes_per_generation=10,
        how_often_paint_each_stroke=7,
        start_stroke_length_mm=4,
        stroke_deepness_mm=1.6,
        build_fns=True)

while True:
    ga.next_generation()
Ejemplo n.º 8
0
    n1_channel_vals = [1, 10, 20, 40, 60, 80, 100]
    for v in n1_channel_vals:
        args.best_n1_channels = v
        args.log_file = "logs/best-n1chan={}.csv".format(v)
        args.model_save = "models/best-n1chan={}.torch".format(v)
        train(args)

    n2_channel_vals = [1, 5, 10, 15, 20, 30, 40]
    for v in n2_channel_vals:
        args.best_n2_channels = v
        args.log_file = "logs/best-n2chan={}.csv".format(v)
        args.model_save = "models/best-n2chan={}.torch".format(v)
        train(args)

    n3_channel_vals = [1, 5, 10, 15, 20, 30, 40]
    for v in n3_channel_vals:
        args.best_n3_channels = v
        args.log_file = "logs/best-n3chan={}.csv".format(v)
        args.model_save = "models/best-n3chan={}.torch".format(v)
        train(args)


if __name__ == "__main__":
    ARGS = get_args()
    if ARGS.model == "simple-ff":
        param_sweep_ff(ARGS)
    elif ARGS.model == "simple-cnn":
        param_sweep_cnn(ARGS)
    elif ARGS.model == "best":
        param_sweep_best(ARGS)
Ejemplo n.º 9
0
def test_setup_logging_creates_rotating_file_handler():
    args = main.get_args()
    logger = main.setup_logging(args)
    assert type(logger.handlers[0]) is logging.handlers.RotatingFileHandler
Ejemplo n.º 10
0
def test_get_server():
    args = get_args()
    server = get_server(args)
    assert server is not None
Ejemplo n.º 11
0
        y[k] = pickle.load(
            open(f'reduced/{opt.outname}_{method}_{n}_{k}_y.p', 'rb'))
    return X, y


def init_nets():
    netG = {}
    # input your own file name
    netG['GAN'] = load_G('netG_80.pth', opt)
    netG['GAN + MI'] = load_G('netG_80_KL.pth', opt)
    netG['Real'] = None
    return netG


if __name__ == '__main__':
    parser = get_args()
    parser.add_argument('--real',
                        action='store_true',
                        default=False,
                        help='whether to use real')
    opt = parser.parse_args()
    print('visualization')
    opt = parser.parse_args()
    print(opt)

    data = util.DATA_LOADER(opt)
    netG = init_nets()

    print(data.unseenclasses)
    load = True
    if load:
Ejemplo n.º 12
0

if __name__ == "__main__":
    lstm = False
    eval = False
    robust = False
    mode = "train" if not eval else "evaluate" if not robust else "eval_robustness"
    sys.argv.append(mode)

    t0 = time.time()

    run_type = "ealstm" if lstm is False else "lstm"
    run_type = "eval-" + run_type if eval else run_type
    run_type = "robust-" + run_type if robust else run_type
    divisions = get_status(all_divisions, run_type)
    config = main.get_args()
    for d in divisions:
        print(f"Stating run-type: {run_type} for division: {d}")
        config["physio_division"] = d
        if eval or robust:
            config["run_dir"] = get_run_dir(d, lstm)
        config["gauges_path"] = gauges_path
        config["db_path"] = db_path
        config["seed"] = seed
        config["mode"] = mode
        config["no_static"] = True if lstm else False
        if mode is "train":
            main.train(config)
        elif mode is "evaluate":
            main.evaluate(config)
        elif mode is "eval_robustness":