コード例 #1
0
 def load_func():
     if config.model_path is not None:
         load_model(session, config.model_path)
         print("Continuing training from model: %s" % config.model_path)
     if config.embedding_path is not None:
         load_variables(session, os.path.join(config.embedding_path, "embedding.tf"),
                        [m.embedding_variable])
         print("Loading embedding vectors from: %s" % config.embedding_path)
コード例 #2
0
def load_data():
    datadir = Path(FLAGS.datadir)

    # Load training data for training the policy.
    dt = utils.load_variables(datadir / f'{FLAGS.env_name}.pkl')
    dt['states'] = dt['states'][:FLAGS.num_episodes_train, :]
    dt['actions'] = dt['actions'][:FLAGS.num_episodes_train, :]
    if 'Visual' in FLAGS.env_name:
        # Decode images from byte strings. NOTE: this might take a lot of
        # memory. You may want to start with a small number of episodes. One
        # way to reduce memory usage would be to convert them into PIL images
        # for each batch in the data loader instead.
        for e in range(dt['states'].shape[0]):
            for t in range(dt['states'].shape[1]):
                obs = Image.open(dt['states'][e, t])
                dt['states'][e, t] = obs
    return dt
コード例 #3
0
def main(_):
    datadir = Path(FLAGS.datadir)
    in_file_name = datadir / (FLAGS.env_name + '.pkl')
    dt = utils.load_variables(str(in_file_name))
    all_states = dt['states']
    all_actions = dt['actions']

    envs = gym.make('Visual' + FLAGS.env_name)
    all_obss = [[None for j in range(all_states.shape[1])]
                for i in range(all_states.shape[0])]
    all_obss = np.array(all_obss)
    for i in tqdm(range(all_states.shape[0])):
        for j in tqdm(range(all_states.shape[1])):
            obs = envs.reset_to_state(all_states[i, j, :])
            obs_bytes = io.BytesIO()
            obs.save(obs_bytes, format='PNG')
            all_obss[i, j] = obs_bytes
    data_file_name = datadir / ('Visual' + FLAGS.env_name + '.pkl')
    utils.save_variables(str(data_file_name), [all_obss, all_actions],
                         ['states', 'actions'],
                         overwrite=True)
コード例 #4
0
ファイル: main.py プロジェクト: mromanelli9/bunkerized-nginx
        print("[!] Missing output directory : " + args.output)
        sys.exit(1)
    if not os.access(args.output, os.W_OK | os.X_OK):
        print("[!] Can't write to the templates directory : " + args.output)
        sys.exit(2)
    if not os.path.exists(args.variables):
        print("[!] Missing variables file : " + args.variables)
        sys.exit(1)
    if not os.access(args.variables, os.R_OK):
        print("[!] Can't read variables file : " + args.variables)
        sys.exit(2)

    # Compute the final config
    configurator = Configurator()
    configurator.load_settings(args.settings)
    variables = utils.load_variables(args.variables)
    configurator.load_variables(variables)
    config = configurator.get_config()

    # Remove old files
    files = glob.glob(args.output + "/*")
    for file in files:
        if (file.endswith(".conf") or file.endswith(".env")
            ) and os.path.isfile(file) and not os.path.islink(file):
            os.remove(file)
        elif os.path.isdir(file) and not os.path.islink(file):
            shutil.rmtree(file, ignore_errors=False)

    # Generate the files from templates and config
    templator = Templator(config, args.templates, args.output, args.target)
    templator.render_global()
コード例 #5
0
    def __init__(self,
                 sess,
                 data_type='synthetic',
                 batch_size=10,
                 Xtrain=None,
                 Ytrain=None,
                 Xtest=None,
                 Ytest=None,
                 Ytest_und=None,
                 Sig=None,
                 M0=None,
                 Params=None):
        # Initialize the object
        self.sess = sess
        # Input shape
        if data_type == 'synthetic':
            self.height_MR = 70
            self.width_MR = 70
            self.channels = 10  # Number of PLDs or time-points
        else:
            self.height_MR = 40
            self.width_MR = 40
            self.channels = 7

        self.output_dim = 2  # number of ASL parameters to estimate (CBF, ATT)
        self.data_type = data_type

        # Train Data
        self.Xtrain = Xtrain  #
        self.Ytrain = Ytrain

        # Test Data
        self.Xtest = Xtest
        self.Ytest = Ytest
        self.Ytest_und = Ytest_und

        # Number of residual blocks in the generator
        self.n_residual_blocks = 8

        # Depth of dual paths in the generator
        self.depth_dual = 4

        # Weight decay
        self.wd = 0.0005
        self.df = 64

        # Batch normalization flag
        self.bn_g = False  # For generator
        self.bn_d = True  # For discriminator

        # Lambda value for loss terms
        self.lam_lp = 1.0
        self.lam_asl = 3.0  # 3.0 is the optimal
        self.lam_adv = 0.5

        print(
            "[ LP loss coef: %f, ASL loss coef: %f, Adversarial loss coef: %f ]"
            % (self.lam_lp, self.lam_asl, self.lam_adv))

        # Learning rate
        self.learning_rate = 1e-3
        self.batch_size = batch_size

        # Norm of reconstruction term
        self.l_num = 1

        # Get the generic ASL parameters
        self.common_vars = ut.load_variables(Params)

        # Call the batch generation for every iteration of GAN
        self.data_generator = ut.generate_batches(Xtrain, Ytrain, Sig, M0,
                                                  self.batch_size)

        # Build GAN model
        self.build_model()