#
#      http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

"""Tree Tensor Network for the groundstate of the Transverse Ising chain."""

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
tf.enable_v2_behavior()

from experiments.tree_tensor_network import ttn_1d_uniform


if __name__ == "__main__":
    num_layers = 6
    max_bond_dim = 16
    dtype = tf.complex128
    build_graphs = True

    num_sweeps = 1000

    Ds = [min(2**i, max_bond_dim) for i in range(1,num_layers+1)]

    print("----------------------------------------------------")
Exemple #2
0
def main(_):
    tf.enable_v2_behavior()
    output_path = FLAGS.converted_checkpoint_path
    v1_checkpoint = FLAGS.checkpoint_to_convert
    bert_config = configs.BertConfig.from_json_file(FLAGS.bert_config_file)
    convert_checkpoint(bert_config, output_path, v1_checkpoint)
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
functions for binary MERA optimization
"""
import tensorflow as tf
import numpy as np
import tensornetwork as tn
import experiments.MERA.binary_mera_lib as bml
import experiments.MERA.binary_mera as bm
import experiments.MERA.misc_mera as misc_mera
import pytest
import copy
tf.enable_v2_behavior()

@pytest.mark.parametrize("chi", [4, 6])
@pytest.mark.parametrize("dtype", [tf.float64, tf.complex128])
def test_ascending_descending(chi, dtype):
    """
    test if ascending and descending operations are doing the right thing
    """
    wC, uC, rho_0 = bml.initialize_binary_MERA_random(phys_dim=2, chi=chi, dtype=dtype)
    wC, uC = bml.unlock_layer(wC, uC) #add a transitional layer
    wC, uC = bml.unlock_layer(wC, uC) #add a transitional layer
    ham_0 = bml.initialize_TFI_hams(dtype)
    rho = [0 for n in range(len(wC) + 1)]
    ham = [0 for n in range(len(wC) + 1)]
    rho[-1] = bml.steady_state_density_matrix(10, rho_0, wC[-1], uC[-1])
    ham[0] = ham_0
Exemple #4
0
# ==============================================================================
"""Test file to display the error message and verify it with FileCheck."""

# RUN: %p/saved_model_error | FileCheck %s

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import sys
from absl import app

from tensorflow import enable_v2_behavior
import tensorflow.compat.v2 as tf

enable_v2_behavior()


class TestModule(tf.Module):
    """The test model has supported op."""
    @tf.function(
        input_signature=[tf.TensorSpec(shape=[3, 3], dtype=tf.float32)])
    def model(self, x):
        y = tf.math.reciprocal(x)  # Not supported
        return y + y


class TestGraphDebugInfo(object):
    """Test stack trace can be displayed."""
    def testSavedModelDebugInfo(self):
        """Save a saved model with unsupported ops, and then load and convert it."""
Exemple #5
0
def main(argv):
    del argv  # Unused
    if hasattr(tf, "enable_v2_behavior"):
        tf.enable_v2_behavior()
    tf.test.main()
Exemple #6
0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 12 11:49:04 2020

@author: nevena
"""
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D

import numpy as np
import tensorflow as tf
import pickle
tf.enable_v2_behavior()  #for those like me who haven't yet installed TF2

#tf.config.optimizer.set_jit(True)

#%%
"""parameters"""
n = int(10000)
#layer dim:
s = 10
p = 50
m = 100
q = 100
r = 100
ni = 0.001  #optimizer Adam
pu = 0.5  #initial condition percentage of points
stdev = 0.1
mean = 0
#%%
Exemple #7
0
def main(_):
  if FLAGS.use_tf2:
    tf.enable_v2_behavior()
  config_content = {
      'action_type': FLAGS.action_type,
      'obs_type': FLAGS.obs_type,
      'reward_shape_val': FLAGS.reward_shape_val,
      'use_subset_instruction': FLAGS.use_subset_instruction,
      'frame_skip': FLAGS.frame_skip,
      'use_polar': FLAGS.use_polar,
      'suppress': FLAGS.suppress,
      'diverse_scene_content': FLAGS.diverse_scene_content,
      'buffer_size': FLAGS.buffer_size,
      'use_movement_bonus': FLAGS.use_movement_bonus,
      'reward_scale': FLAGS.reward_scale,
      'scenario_type': FLAGS.scenario_type,
      'img_resolution': FLAGS.img_resolution,
      'render_resolution': FLAGS.render_resolution,

      # agent
      'agent_type': FLAGS.agent_type,
      'masking_q': FLAGS.masking_q,
      'discount': FLAGS.discount,
      'instruction_repr': FLAGS.instruction_repr,
      'encoder_type': FLAGS.encoder_type,
      'learning_rate': FLAGS.learning_rate,
      'polyak_rate': FLAGS.polyak_rate,
      'trainable_encoder': FLAGS.trainable_encoder,
      'embedding_type': FLAGS.embedding_type,

      # learner
      'num_episode': FLAGS.num_episode,
      'optimization_steps': FLAGS.optimization_steps,
      'batchsize': FLAGS.batchsize,
      'sample_new_scene_prob': FLAGS.sample_new_scene_prob,
      'max_episode_length': FLAGS.max_episode_length,
      'record_atomic_instruction': FLAGS.record_atomic_instruction,
      'paraphrase': FLAGS.paraphrase,
      'relabeling': FLAGS.relabeling,
      'k_immediate': FLAGS.k_immediate,
      'future_k': FLAGS.future_k,
      'negate_unary': FLAGS.negate_unary,
      'min_epsilon': FLAGS.min_epsilon,
      'epsilon_decay': FLAGS.epsilon_decay,
      'collect_cycle': FLAGS.collect_cycle,
      'use_synonym_for_rollout': FLAGS.use_synonym_for_rollout,
      'reset_mode': FLAGS.reset_mode,
      'maxent_irl': FLAGS.maxent_irl,

      # relabeler
      'sampling_temperature': FLAGS.sampling_temperature,
      'generated_label_num': FLAGS.generated_label_num,
      'use_labeler_as_reward': FLAGS.use_labeler_as_reward,
      'use_oracle_instruction': FLAGS.use_oracle_instruction
  }

  if FLAGS.maxent_irl:
    assert FLAGS.batchsize % FLAGS.irl_parallel_n == 0
    config_content['irl_parallel_n'] = FLAGS.irl_parallel_n
    config_content['irl_sample_goal_n'] = FLAGS.irl_sample_goal_n
    config_content['relabel_proportion'] = FLAGS.relabel_proportion
    config_content['entropy_alpha'] = FLAGS.entropy_alpha

  cfg = Config(config_content)

  if FLAGS.experiment_confg:
    cfg.update(get_exp_config(FLAGS.experiment_confg))

  save_home = FLAGS.save_dir if FLAGS.save_dir else tf.test.get_temp_dir()
  if FLAGS.varying:
    exp_name = 'exp-'
    for varied_var in FLAGS.varying.split(','):
      exp_name += str(varied_var) + '=' + str(FLAGS[varied_var].value) + '-'
  else:
    exp_name = 'SingleExperiment'
  save_dir = os.path.join(save_home, exp_name)
  try:
    gfile.MkDir(save_home)
  except gfile.Error as e:
    print(e)
  try:
    gfile.MkDir(save_dir)
  except gfile.Error as e:
    print(e)

  cfg.update(Config({'model_dir': save_dir}))

  print('############################################################')
  print(cfg)
  print('############################################################')

  env, learner, replay_buffer, agent, extra_components = experiment_setup(
      cfg, FLAGS.use_tf2, FLAGS.use_nn_relabeling)
  agent.init_networks()

  if FLAGS.use_tf2:
    logger = Logger2(save_dir)
  else:
    logger = Logger(save_dir)

  with gfile.GFile(os.path.join(save_dir, 'config.json'), mode='w+') as f:
    json.dump(cfg.as_dict(), f, sort_keys=True, indent=4)

  if FLAGS.save_model and tf.train.latest_checkpoint(save_dir):
    print('Loading saved weights from {}'.format(save_dir))
    agent.load_model(save_dir)

  if FLAGS.save_model:
    video_dir = os.path.join(save_dir, 'rollout_cycle_{}.mp4'.format('init'))
    print('Saving video to {}'.format(video_dir))
    learner.rollout(
        env,
        agent,
        video_dir,
        num_episode=FLAGS.rollout_episode,
        record_trajectory=FLAGS.record_trajectory)

  success_rate_ema = -1.0

  # Training loop
  for epoch in range(FLAGS.num_epoch):
    for cycle in range(FLAGS.num_cycle):
      stats = learner.learn(env, agent, replay_buffer)

      if success_rate_ema < 0:
        success_rate_ema = stats['achieved_goal']

      loss_dropped = stats['achieved_goal'] < 0.1 * success_rate_ema
      far_along_training = stats['global_step'] > 100000
      if FLAGS.save_model and loss_dropped and far_along_training:
        print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
        print('Step {}: Loading models due to sudden loss drop D:'.format(
            stats['global_step']))
        print('Dropped from {} to {}'.format(success_rate_ema,
                                             stats['achieved_goal']))
        agent.load_model(save_dir)
        print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
        continue
      success_rate_ema = 0.95 * success_rate_ema + 0.05 * stats['achieved_goal']

      at_save_interval = stats['global_step'] % FLAGS.save_interval == 0
      better_reward = stats['achieved_goal'] > success_rate_ema
      if FLAGS.save_model and at_save_interval and better_reward:
        print('Saving model to {}'.format(save_dir))
        agent.save_model(save_dir)

      if FLAGS.save_model and stats['global_step'] % FLAGS.video_interval == 0:
        video_dir = os.path.join(save_dir, 'rollout_cycle_{}.mp4'.format(cycle))
        print('Saving video to {}'.format(video_dir))
        test_success_rate = learner.rollout(
            env,
            agent,
            video_dir,
            record_video=FLAGS.save_video,
            num_episode=FLAGS.rollout_episode,
            record_trajectory=FLAGS.record_trajectory)
        stats['Test Success Rate'] = test_success_rate
        print('Test Success Rate: {}'.format(test_success_rate))

      stats['ema success rate'] = success_rate_ema
      logger.log(epoch, cycle, stats)
Exemple #8
0
def main(argv):
    if len(argv) > 1:
        raise app.UsageError('Too many command-line arguments.')

    tf.enable_v2_behavior()
    # TODO(b/139129100): Remove this once the local executor is the default.
    tff.framework.set_default_executor(
        tff.framework.local_executor_factory(max_fanout=25))

    stackoverflow_train, stackoverflow_validation, stackoverflow_test = dataset.get_stackoverflow_datasets(
        vocab_tokens_size=FLAGS.vocab_tokens_size,
        vocab_tags_size=FLAGS.vocab_tags_size,
        client_batch_size=FLAGS.client_batch_size,
        client_epochs_per_round=FLAGS.client_epochs_per_round,
        max_training_elements_per_user=FLAGS.max_elements_per_user,
        num_validation_examples=FLAGS.num_validation_examples)

    sample_client_dataset = stackoverflow_train.create_tf_dataset_for_client(
        stackoverflow_train.client_ids[0])
    # TODO(b/144382142): Sample batches cannot be eager tensors, since they are
    # passed (implicitly) to tff.learning.build_federated_averaging_process.
    sample_batch = tf.nest.map_structure(lambda x: x.numpy(),
                                         next(iter(sample_client_dataset)))

    model_builder = functools.partial(
        models.create_logistic_model,
        vocab_tokens_size=FLAGS.vocab_tokens_size,
        vocab_tags_size=FLAGS.vocab_tags_size)

    loss_builder = functools.partial(tf.keras.losses.BinaryCrossentropy,
                                     from_logits=False,
                                     reduction=tf.keras.losses.Reduction.SUM)

    training_process = iterative_process_builder.from_flags(
        dummy_batch=sample_batch,
        model_builder=model_builder,
        loss_builder=loss_builder,
        metrics_builder=metrics_builder)

    client_datasets_fn = training_utils.build_client_datasets_fn(
        stackoverflow_train, FLAGS.clients_per_round)

    evaluate_fn = training_utils.build_evaluate_fn(
        model_builder=model_builder,
        eval_dataset=stackoverflow_validation,
        loss_builder=loss_builder,
        metrics_builder=metrics_builder)

    test_fn = training_utils.build_evaluate_fn(
        model_builder=model_builder,
        # Use both val and test for symmetry with other experiments, which
        # evaluate on the entire test set.
        eval_dataset=stackoverflow_validation.concatenate(stackoverflow_test),
        loss_builder=loss_builder,
        metrics_builder=metrics_builder)

    logging.info('Training model:')
    logging.info(model_builder().summary())

    training_loop.run(training_process,
                      client_datasets_fn,
                      evaluate_fn,
                      test_fn=test_fn)