Beispiel #1
0
    def __init__(self, **kwargs):
        """
        For a full list of configuration options, see `finetune.config`.

        :param config: A config object generated by `finetune.config.get_config` or None (for default config).
        :param **kwargs: key-value pairs of config items to override.
        """
        weak_self = weakref.ref(self)

        def cleanup():
            strong_self = weak_self()
            if strong_self is not None:
                BaseModel.__del__(strong_self)

        atexit.register(cleanup)
        d = deepcopy(self.defaults)
        d.update(kwargs)
        self.config = get_config(**d)
        self.resolved_gpus = None
        self.validate_config()
        download_data_if_required(self.config.base_model)
        self.input_pipeline = self._get_input_pipeline()
        self._trained = False
        self._initialize()
        if self.config.debugging_logs:
            os.environ["TF_CPP_MIN_LOG_LEVEL"] = "0"
            tf_logging.set_verbosity(tf_logging.DEBUG)
Beispiel #2
0
    def __init__(self, **kwargs):
        """
        For a full list of configuration options, see `finetune.config`.

        :param config: A config object generated by `finetune.config.get_config` or None (for default config).
        :param **kwargs: key-value pairs of config items to override.
        """
        weak_self = weakref.ref(self)

        def cleanup():
            strong_self = weak_self()
            if strong_self is not None:
                BaseModel.__del__(strong_self)

        atexit.register(cleanup)


        self.config = get_config(**kwargs)
        if self.config.default_context is not None and type(self.config.default_context) != dict:
            raise FinetuneError(
                "Invalid default given: Need a dictionary of auxiliary info fields and default values."
            )
        self.config.use_auxiliary_info = self.config.default_context is not None

        self.resolved_gpus = None
        self.validate_config()
        download_data_if_required(self.config.base_model)
        self.input_pipeline = self._get_input_pipeline()
        self._trained = False
        self._initialize()
        if self.config.debugging_logs:
            os.environ["TF_CPP_MIN_LOG_LEVEL"] = "0"
            tf_logging.set_verbosity(tf_logging.DEBUG)
Beispiel #3
0
def navlin(initial_state, goal, beta, horizon, debug, verbose):
    """Generate and solve the linear navigation LQR problem.

    Args:

        initial_state: list of floats.

        goal: list of floats.
    """

    if verbose:
        tf_logging.set_verbosity(tf_logging.INFO)

    if debug:
        tf_logging.set_verbosity(tf_logging.DEBUG)

    initial_state = list(map(float, initial_state.split()))
    x0 = np.array(initial_state, dtype=np.float32)[:, np.newaxis]

    goal = list(map(float, goal.split()))
    g = np.array(goal, dtype=np.float32)[:, np.newaxis]

    solver = envs.make_lqr_linear_navigation(g, beta)
    trajectory = solver.solve(x0, horizon)

    print(repr(trajectory))
    print()
    print(str(trajectory))
Beispiel #4
0
def tensorflow_shutup(verbose=False):
    """
    Make Tensorflow less verbose
    """
    try:
        # noinspection PyPackageRequirements
        if not verbose:
            #import os
            from tensorflow.compat.v1 import logging
            logging.set_verbosity(logging.ERROR)
            os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

        # Monkey patching deprecation utils to shut it up! Maybe good idea to disable this once after upgrade
        # noinspection PyUnusedLocal
        def deprecated(date, instructions, warn_once=True):
            def deprecated_wrapper(func):
                return func

            return deprecated_wrapper

        from tensorflow.python.util import deprecation
        deprecation.deprecated = deprecated

    except ImportError:
        pass
Beispiel #5
0
def silence_tensorflow():
    """To silence the warning and error messages from Tensorflow.

    The end users should not be exposed to the huge amount of confusing messages,
    but developers may need to. Therefore this function is defined here for CLI
    only, and not in omnizart API files.
    """
    # pylint: disable=E0401,C0415
    from tensorflow.compat.v1 import logging as tf_logger
    tf_logger.set_verbosity(tf_logger.ERROR)
Beispiel #6
0
def main():
    """Ejecuta la primera parte de la práctica 2 paso a paso."""

    # No mostrar warnings de TensorFlow
    logging.set_verbosity(logging.ERROR)

    print("\n--- EJERCICIO 1: BASENET ---\n")
    ex1()

    print("\n--- EJERCICIO 2: BASENET MEJORADO ---\n")
    ex2()
Beispiel #7
0
def main():
    """Ejecuta la segunda parte de la práctica 2 paso a paso."""

    # No mostrar warnings de TensorFlow
    logging.set_verbosity(logging.ERROR)

    print("\n--- EJERCICIO 1: EXTRACTOR DE CARACTERÍSTICAS ---\n")
    resnet_feature_extraction()

    print("\n--- EJERCICIO 2: FINE-TUNING ---\n")
    resnet_fine_tuning()
Beispiel #8
0
def pick_examples_from_tfrecord(filename, sampling_frac=0.02):
    # tf_record_iterator is deprecated. Silence those warnings for now.
    # TODO(tommadams): remove this once
    # https://github.com/tensorflow/minigo/issues/740 is fixed.
    v = tf_logging.get_verbosity()
    tf_logging.set_verbosity(tf_logging.ERROR)
    protos = list(tf.python_io.tf_record_iterator(filename, READ_OPTS))
    tf_logging.set_verbosity(v)

    number_samples = np.random.poisson(len(protos) * sampling_frac)
    choices = random.sample(protos, min(len(protos), number_samples))
    return choices
 def __init__(self, **kwargs):
     import tensorflow.compat.v1.logging as logging
     import os
     logging.set_verbosity(logging.ERROR)
     os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
     self.__dict__.update(self._defaults)  # set up default values
     self.__dict__.update(kwargs)  # and update with user overrides
     self.image = True
     self.class_names = self._get_class()
     self.anchors = self._get_anchors()
     self.sess = K.get_session()
     self.boxes, self.scores, self.classes = self.generate()
    def run_plots(self, args):
        tfl.set_verbosity(tfl.ERROR)
        os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
        print(f"Test Arguments\n{self.args2str(args)}")

        params = [("ddpg", 1, 1720, 20, ""), ("trpo", 1, 292, 10, ""),
                  ("ppo", 1, 7, 1, "final_test")]

        re_d, sr_d = self.my_compute_data(args=args,
                                          env=args.env,
                                          params=params,
                                          n_episodes=args.n_episodes)
        self.my_plot(re_d=re_d, sr_d=sr_d, plots_dir=args.plots_dir)
    def __init__(self):
        import tensorflow.compat.v1.logging as logging
        import os
        logging.set_verbosity(logging.ERROR)
        os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
        stages = 6
        np_branch1 = 38
        np_branch2 = 19
        self.model = get_testing_model(np_branch1, np_branch2, stages)

        keras_weights_file = os.path.join(_here, 'model/pose_iter_440000.h5')
        self.model.load_weights(keras_weights_file)

        # load config
        self.params, self.model_params = config_reader()
Beispiel #12
0
def configure_logger(verbose: bool) -> None:
    """
    Configure application logger.

    Parameters:
        verbose (bool):
            `True` to use verbose logger, `False` otherwise.
    """
    from tensorflow import get_logger
    from tensorflow.compat.v1 import logging as tf_logging

    tf_logger = get_logger()
    tf_logger.handlers = [handler]
    if verbose:
        tf_logging.set_verbosity(tf_logging.INFO)
        logger.setLevel(logging.DEBUG)
    else:
        warnings.filterwarnings("ignore")
        tf_logging.set_verbosity(tf_logging.ERROR)
Beispiel #13
0
def ilqr(**kwargs):
    """Run iLQR for a given environment and horizon.

    Args:

        ENV: Path to the environment's config JSON file.
    """
    verbose = kwargs["verbose"]

    if verbose == 1:
        level = tf_logging.INFO
    elif verbose == 2:
        level = tf_logging.DEBUG
    else:
        level = tf_logging.ERROR

    tf_logging.set_verbosity(level)

    def format_fn(param):
        fmt = {
            "env": None,
            "logdir": None,
            "num_samples": None,
            "num_workers": None,
            "verbose": None
        }
        return fmt.get(param, param)

    config_it = tuneconfig.ConfigFactory(kwargs, format_fn)

    runner = tuneconfig.Experiment(config_it, kwargs["logdir"])
    runner.start()

    exec_func = online_ilqr_run if kwargs["online"] else ilqr_run

    results = runner.run(exec_func, kwargs["num_samples"],
                         kwargs["num_workers"])

    for trial_id, runs in results.items():
        for _, trajectory in runs:
            print(repr(trajectory))
            print(str(trajectory))
Beispiel #14
0
def tensorflow_silence():
    ## thanks to
    # stackoverflow.com/questions/40426502/is-there-a-way-to-suppress-the-messages-tensorflow-prints
    try:
        from tensorflow.compat.v1 import logging
        logging.set_verbosity(logging.ERROR)
        os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
        os.environ['KMP_WARNINGS'] = '0'

        def deprecated(date, instructions, warn_once=False):
            def deprecated_wrapper(func):
                return func

            return deprecated_wrapper

        from tensorflow.python.util import deprecation
        deprecation.deprecated = deprecated

    except ImportError:
        pass
Beispiel #15
0
def lqr(initial_state, action_size, horizon, debug, verbose):
    """Generate and solve a randomly-created LQR problem.

    Args:

        initial_state: list of floats.
    """

    if verbose:
        tf_logging.set_verbosity(tf_logging.INFO)

    if debug:
        tf_logging.set_verbosity(tf_logging.DEBUG)

    initial_state = list(map(float, initial_state.split()))
    x0 = np.array(initial_state, dtype=np.float32)[:, np.newaxis]
    state_size = len(initial_state)

    solver = envs.make_lqr(state_size, action_size)
    trajectory = solver.solve(x0, horizon)

    print(repr(trajectory))
    print()
    print(str(trajectory))
Beispiel #16
0
import gym
import numpy as np
import psutil
import tensorflow as tf
import tensorflow.compat.v1.logging as tf_logging
import tuneconfig

from tfmpc import envs
import tfmpc.solvers.lqr
from tfmpc.launchers import online_ilqr_run
from tfmpc.launchers import ilqr_run

os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"

gym.logger.set_level(gym.logger.ERROR)
tf_logging.set_verbosity(tf_logging.ERROR)


@click.group()
def cli():
    pass


@cli.command()
@click.argument("initial-state")
@click.option("--action-size",
              "-a",
              type=click.IntRange(min=1),
              default=1,
              help="The number of action variables.")
@click.option("--horizon",
Beispiel #17
0
        "--input-dir",
        help='input directory that contains subfolder of train, val and test')
    add_arg("--patterns", help='file patterns', default='*')
    add_arg("--output-dir", help="where the model and training info saved")
    add_arg('-d',
            '--distributed',
            action='store_true',
            help='data distributed training')
    add_arg("--num-iters",
            help="number of message passing steps",
            default=8,
            type=int)
    add_arg("--learning-rate", help='learing rate', default=0.0005, type=float)
    add_arg("--max-epochs", help='number of epochs', default=1, type=int)
    add_arg("--batch-size",
            type=int,
            help='training/evaluation batch size',
            default=500)
    add_arg("--shuffle-size",
            type=int,
            help="number of events for shuffling",
            default=650)
    add_arg("-v", "--verbose", help='verbosity', choices=['DEBUG', 'ERROR', 'FATAL', 'INFO', 'WARN'],\
        default="INFO")
    args, _ = parser.parse_known_args()

    # Set python level verbosity
    logging.set_verbosity(args.verbose)
    # Suppress C++ level warnings.
    # os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
    train_and_evaluate(args)
Beispiel #18
0
# -*- coding: utf-8 -*-

import pandas as pd
import sys
import tensorflow as tf
import tensorflow.compat.v1.logging as log
log.set_verbosity(log.INFO)
import numpy as np
import shutil
import os
import json
import glob
from datetime import date, timedelta
from time import time
import random

sys.path.append("../preprocessing")
from config import ColumnType, ColumnTransform
from data_parser import FeatureDictionary, DataParser

FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string("label", 'gender', "col name of label")
tf.app.flags.DEFINE_integer("num_threads", 6, "Number of threads")
tf.app.flags.DEFINE_integer("embedding_size", 32, "Embedding size")
tf.app.flags.DEFINE_integer("num_epochs", 10, "Number of epochs")
tf.app.flags.DEFINE_integer("batch_size", 64, "Number of batch size")
tf.app.flags.DEFINE_integer("log_steps", 1000, "save summary every steps")
tf.app.flags.DEFINE_float("learning_rate", 0.0005, "learning rate")
tf.app.flags.DEFINE_float("l2_reg", 0.0001, "L2 regularization")
tf.app.flags.DEFINE_string("optimizer", 'Adam', "optimizer type {Adam, Adagrad, GD, Momentum}")
tf.app.flags.DEFINE_string("deep_layers", '256,128,64', "deep layers")
from sklearn.metrics import roc_auc_score, roc_curve
from tensorflow.compat.v1.logging import INFO, set_verbosity
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn.metrics import (
    average_precision_score,
    precision_recall_curve,
    roc_auc_score,
    roc_curve,
)


random.seed(a=None, version=2)

set_verbosity(INFO)


def get_mean_std_per_batch(image_path, df, H=320, W=320):
    sample_data = []
    for idx, img in enumerate(df.sample(100)["Image"].values):
        # path = image_dir + img
        sample_data.append(
            np.array(image.load_img(image_path, target_size=(H, W))))

    mean = np.mean(sample_data[0])
    std = np.std(sample_data[0])
    return mean, std


def load_image(img, image_dir, df, preprocess=True, H=320, W=320):
Beispiel #20
0
import sys

import tensorflow as tf
from tensorflow import keras

# Helper libraries
import numpy as np
import matplotlib.pyplot as plt
import os
import subprocess
import tempfile
import tensorflow
from tensorflow.compat.v1.logging import set_verbosity
set_verbosity(tensorflow.compat.v1.logging.ERROR)

def train():
    fashion_mnist = keras.datasets.fashion_mnist
    (train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()

    # scale the values to 0.0 to 1.0
    train_images = train_images / 255.0
    test_images = test_images / 255.0

    # reshape for feeding into the model
    train_images = train_images.reshape(train_images.shape[0], 28, 28, 1)
    test_images = test_images.reshape(test_images.shape[0], 28, 28, 1)

    class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
                   'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']

    model = keras.Sequential([
Beispiel #21
0
    parser.add_argument(
        '--noise',
        type=float,
        default=0.0,
        help='noise-level that adds to queries - sigma (default: 0.0)')

    # load arguments
    args = parser.parse_args()
    print(json.dumps(vars(args), indent=2))

    # ------------------------------------------------------------
    #  Tensorflow configurations
    # ------------------------------------------------------------

    # control tensorflow info. level
    set_verbosity(tf.compat.v1.logging.ERROR)

    # enable eager execution
    tf.enable_eager_execution()

    # ------------------------------------------------------------
    #  Load the baseline model
    # ------------------------------------------------------------
    # extract the basic information from the baseline model (always vanilla)
    net_tokens = args.netbase.split('/')
    if 'subtask' == args.dataset:
        # : subtask case
        net_tokens = net_tokens[3].split('_')
    else:
        # : fashion_mnist/cifar10
        net_tokens = net_tokens[2].split('_')
Beispiel #22
0
    train_parser.add_argument(
        '--plot-tstep',
        type = int,
        default = 1,
        help = 'Number of timesteps in between generator plotting\nDefault: 1'
    )

    meta_parser.add_argument(
        '--save-dir',
        required = True,
        type = str,
        help = 'Directory in which to save models\nRequired'
    )

    init_args, _ = init_parser.parse_known_args()
    train_args, _ = train_parser.parse_known_args()
    meta_args, _ = meta_parser.parse_known_args()

    return init_args, train_args, meta_args

if __name__ == '__main__':
    
    set_verbosity('INFO')

    init_args, train_args, meta_args = get_args()
    
    gan = DiscoGAN(**vars(init_args))
    gan.train(**vars(train_args))
    gan.save_models(meta_args.save_dir)
def do_tpoisoning(arguments):

    # --------------------------------------------------------------------------
    #   Passed arguments
    # --------------------------------------------------------------------------
    task_num = arguments[0]
    task_queue = arguments[1]
    args = arguments[2]

    # ------------------------------------------------------------
    #  Tensorflow configurations (load TF here, main causes an error)
    # ------------------------------------------------------------
    import tensorflow as tf
    from tensorflow.python.client import device_lib
    from tensorflow.compat.v1.logging import set_verbosity, ERROR
    from tensorflow.compat.v1.estimator.inputs import numpy_input_fn

    # these will load the tensorflow module, so load it here
    from utils import datasets, models

    # control tensorflow info. level
    # ------------------------------------------------------------
    #  Level | Level for Humans | Level Description
    # -------|------------------|---------------------------------
    #  0     | DEBUG            | [Default] Print all messages
    #  1     | INFO             | Filter out INFO messages
    #  2     | WARNING          | Filter out INFO & WARNING messages
    #  3     | ERROR            | Filter out all messages
    set_verbosity(ERROR)

    # ------------------------------------------------------------
    #  Run control... (for the error cases)
    # ------------------------------------------------------------
    skip_data = True if args.fromtidx else False
    skip_poison = True if (args.frompidx >= 0) else False
    print(
        ' : [Task: {}] skip conditions, from: [{}th target] w. [{}th poison]'.
        format(task_num, skip_data, skip_poison))

    # --------------------------------------------------------------------------
    #   Use the sampled dataset, not the entire one
    # --------------------------------------------------------------------------
    if os.path.exists(args.samples):
        # : load the indexes from the csv file (that contains the list of ints)
        sample_indexes = io.load_from_csv(args.samples)[0]
        sample_indexes = list(map(int, sample_indexes))
        print(' : [Task: {}] consider [{}] target sampled from the entirety'.
              format(task_num, len(sample_indexes)))
    else:
        sample_indexes = []
        print(' : [Task: {}] do not sample the targets, consider all.'.format(
            task_num))

    # ------------------------------------------------------------
    #  Do poisoning attacks for each case
    # ------------------------------------------------------------
    for each_data in task_queue:
        """
            Set store locations
        """
        # extract the store location (ex. vanilla_conv.../10.0_2_2000....)
        store_dir = args.poisond.split('/')[4:]
        store_dir = '/'.join(store_dir)

        # the target index
        poison_toks = each_data.split('/')
        poison_tkey = poison_toks[-1].replace('.pkl', '')
        poison_tkey = poison_tkey.split('_')[-1]

        # when we use sampling, check if the indexes are in our interest
        if (sample_indexes) \
            and (int(poison_tkey) not in sample_indexes):
            print(
                ' : [Task: {}][Target: {}] is not in our samples, skip'.format(
                    task_num, poison_tkey))
            continue

        # result dir and the file to store
        results_dir = os.path.join('results', 'tpoisoning', 'clean-labels',
                                   args.attmode, store_dir)
        if not os.path.exists(results_dir): os.makedirs(results_dir)
        result_file = os.path.join(results_dir,
                                   'attack_w_{}.csv'.format(poison_tkey))
        print(' : [Task: {}][Target: {}] Store the result to [{}]'.format(
            task_num, poison_tkey, result_file))
        """
            Skip the current data, based on the target index
        """
        if (args.fromtidx == poison_tkey): skip_data = False
        if skip_data:
            print(' : [Task: {}][Target: {}] Skip this...'.format(
                task_num, poison_tkey))
            continue
        """
            Load the attack data
        """
        # : load the dataset
        (x_train, y_train), (x_test, y_test) =  \
            datasets.define_dataset(args.dataset, args.datapth)

        # : bound check for the inputs
        assert (x_train.min() == 0.) and (x_train.max() == 1.) \
            and (x_test.min() == 0.) and (x_test.max() == 1.)
        print (' : [Task: {}][Target: {}] Load the dataset [{}] from [{}]'.format( \
            task_num, poison_tkey, args.dataset, args.datapth))

        # : load the poisons
        (x_poisons, y_poisons), (x_target, y_target) = \
            datasets.load_poisons(each_data, x_test, y_test, sort=True)

        # : existence of the poisons
        if (x_poisons.size == 0) or (y_poisons.size == 0):
            print(
                ' : [Task: {}][Target: {}] Doesn\'t have poisons, skip'.format(
                    task_num, poison_tkey))
            continue

        # : bound check for the poisons
        assert (x_train.min() == 0.) and (x_train.max() == 1.) \
            and (x_test.min() == 0.) and (x_test.max() == 1.)
        print(' : [Task: {}][Target: {}] Load the poisons from [{}]'.format(
            task_num, poison_tkey, each_data))
        """
            Blend poisons and re-train each model
            1) oneshot: consider only one poison at a time
            2) multipoison: consider multiple poisons at a time (0th ~ nth)
        """
        # : condition to stop attack (once the attacker successes on a target)
        stop_attack = False

        # : decide how many poisons to use
        for pidx in range(len(x_poisons)):

            # :: skip, if the attack has been successful
            if stop_attack: continue

            # :: set the poison index
            poison_index = pidx + 1

            # :: consider max. the number of poisons specified
            if (args.poisonn > 0) \
                and (poison_index > args.poisonn):
                print (' : [Task: {}][Target: {}][{:>3}] Stop, # of poisons to consider is [{}]'.format( \
                    task_num, poison_tkey, poison_index, args.poisonn))
                break

            # :: skip the current poison, based on the poison index
            if (args.frompidx == poison_index): skip_poison = False
            if skip_poison:
                print(' : [Task: {}][Target: {}][{:>3}] Skip this poison...'.
                      format(task_num, poison_tkey, poison_index))
                continue

            # :: cleanup directories in the previous runs
            _cleanup_directories(results_dir, poison_tkey)

            # :: copy the checkpoint to the result dir.
            result_pmodel = os.path.join(
                results_dir, '{}_{}'.format(poison_tkey, poison_index))
            shutil.copytree(args.netpath, result_pmodel)
            time.sleep(_wait_ops)  # delay for copying files
            print (' : [Task: {}][Target: {}][{:>3}] Copy the clean model to [{}]'.format( \
                task_num, poison_tkey, poison_index, result_pmodel))

            # :: tensorflow runtime configuration
            cur_rconf = tf.estimator.RunConfig(
                tf_random_seed=_rand_fix,
                keep_checkpoint_max=1,  # 0 means all, do not use
            )

            # :: extract the basic information from the model location
            mtokens = args.netpath.split('/')
            mtokens = mtokens[2].split('_')
            batch_size = int(mtokens[2])
            epochs = int(mtokens[3])
            if ('purchases' == args.dataset):
                epochs = epochs // 2
            else:
                epochs = 20 if (epochs > 20) else (epochs // 2)
            learn_rate = float(mtokens[4])

            # :: load the pre-trained model
            if not args.privacy:
                cur_model = models._load_vanilla_model( \
                    cur_rconf, \
                    args.dataset, args.datapth, args.network, result_pmodel, \
                    batch_size, learn_rate)
                print (' : [Task: {}][Target: {}][{:>3}] Load the '.format(task_num, poison_tkey, poison_index) + \
                        'pre-trained vanilla model from [{}]'.format(result_pmodel))
            else:
                # :: extract the extra information about privacy
                epsilon = float(mtokens[5])
                delta = float(mtokens[6])
                norm_clip = float(mtokens[7])
                noises = float(mtokens[8])

                # :: load the privacy model
                cur_model = models._load_dp_model( \
                    cur_rconf, \
                    args.dataset, args.datapth, x_train.shape[0], args.network, result_pmodel, \
                    batch_size, learn_rate, epsilon, delta, norm_clip, noises)
                print (' : [Task: {}][Target: {}][{:>3}] Load the '.format(task_num, poison_tkey, poison_index) + \
                        'pre-trained privacy model from [{}]'.format(result_pmodel))

            # :: blend poisons into the training data
            if 'oneshot' == args.attmode:
                cur_x_train = np.concatenate(
                    (x_train, x_poisons[poison_index - 1:poison_index]),
                    axis=0)
                cur_y_train = np.concatenate(
                    (y_train, y_poisons[poison_index - 1:poison_index]),
                    axis=0)
            elif 'multipoison' == args.attmode:
                cur_x_train = np.concatenate(
                    (x_train, x_poisons[:poison_index]), axis=0)
                cur_y_train = np.concatenate(
                    (y_train, y_poisons[:poison_index]), axis=0)
            else:
                assert False, ('Error: unknown attack mode - {}'.format(
                    args.attmode))

            # :: create the estimator functions
            cur_train_fn = numpy_input_fn(x={'x': cur_x_train},
                                          y=cur_y_train,
                                          batch_size=batch_size,
                                          num_epochs=epochs,
                                          shuffle=True)
            cur_test_fn = numpy_input_fn(x={'x': x_test},
                                         y=y_test,
                                         num_epochs=1,
                                         shuffle=False)
            cur_target_fn = numpy_input_fn(x={'x': x_target},
                                           y=y_target,
                                           num_epochs=1,
                                           shuffle=False)

            # :: condition to remove the retrained model
            remove_pmodel = True

            # :: to compare the probability changes from the oracle
            oracle_predict = cur_model.predict(input_fn=cur_target_fn)
            oracle_predict = list(oracle_predict)[0]
            oracle_bas_prob = oracle_predict['probabilities'][args.b_class]
            oracle_tar_prob = oracle_predict['probabilities'][args.t_class]

            # :: re-train the network with the poisoning data
            cur_steps_per_epoch = cur_x_train.shape[0] // batch_size
            for cur_epoch in range(1, epochs + 1):

                # ::: train for an epoch
                cur_model.train( \
                    input_fn=cur_train_fn, steps=cur_steps_per_epoch)

                # ::: evaluate for one instance
                cur_predicts = cur_model.predict(input_fn=cur_target_fn)
                cur_predicts = list(cur_predicts)[0]
                cur_probs = cur_predicts['probabilities']
                cur_bas_prob = cur_predicts['probabilities'][args.b_class]
                cur_tar_prob = cur_predicts['probabilities'][args.t_class]

                # ::: check if we have the successful attack
                if (cur_predicts['classes'] == args.t_class):

                    # > validate the re-trained model
                    cur_predicts = cur_model.evaluate(input_fn=cur_test_fn)
                    cur_accuracy = cur_predicts['accuracy']

                    # > only compute the accuracy (when no privacy)
                    if not args.privacy:
                        # > store the data to a file
                        cur_result = [[poison_tkey, poison_index, \
                                        oracle_bas_prob, oracle_tar_prob, \
                                        cur_bas_prob, cur_tar_prob, \
                                        cur_epoch, cur_accuracy]]
                        io.store_to_csv(result_file, cur_result, mode='a')

                        # > notify
                        print (' : [Task: {}][Target: {}][{:>3}] epoch {} - attack success!'.format( \
                            task_num, poison_tkey, poison_index, cur_epoch))
                        print ('  - Prob [3:{:.4f} / 4:{:.4f}], acc [{:.4f}]'.format( \
                            cur_bas_prob, cur_tar_prob, cur_accuracy), flush=True)

                    # > compute the epsilon (when privacy)
                    else:
                        cur_epsilon = models.compute_epsilon( \
                            cur_epoch * cur_steps_per_epoch, \
                            cur_x_train.shape[0], batch_size, delta, noises)

                        # > store the data to a file
                        cur_result = [[poison_tkey, poison_index, \
                                        oracle_bas_prob, oracle_tar_prob, \
                                        cur_bas_prob, cur_tar_prob, \
                                        cur_epoch, cur_accuracy, cur_epsilon]]
                        io.store_to_csv(result_file, cur_result, mode='a')

                        # > notify
                        print (' : [Task: {}][Target: {}][{:>3}] epoch {} - attack success!'.format( \
                            task_num, poison_tkey, poison_index, cur_epoch))
                        print ('  - Prob [3:{:.4f} / 4:{:.4f}], acc [{:.4f}], eps [{:.4f} <- {:.4f} + {:.4f}]'.format( \
                            cur_bas_prob, cur_tar_prob, cur_accuracy, cur_epsilon+epsilon, epsilon, cur_epsilon), flush=True)

                    # > stop the attack process (retain model and stop)
                    remove_pmodel = False
                    stop_attack = True
                    break

                # ::: if not successful
                else:
                    if (len(cur_probs) > 10): cur_probs = cur_probs[:10]
                    print (' : [Task: {}][Target: {}][{:>3}] epoch {} - attack fail, keep going - Prob [3:{:.4f} / 4:{:.4f}] - {}'.format( \
                        task_num, poison_tkey, poison_index, cur_epoch, cur_bas_prob, cur_tar_prob, cur_probs), flush=True)
                # ::: end if (cur_accuracy...

            # :: end for epoch...

            # :: remove model if it's true
            if remove_pmodel:
                shutil.rmtree(result_pmodel, ignore_errors=True)
                time.sleep(_wait_ops)
                print(' : [Task: {}][Target: {}] Attack failed, remove [{}]'.
                      format(task_num, poison_tkey, result_pmodel))

            # :: reset the tensorflow graph for another run
            tf.reset_default_graph()

        # : end for pidx...
    # end for aidx...

    print(' : [Task: {}] finished'.format(task_num))
Beispiel #24
0
import os, sys; os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
from time import time
from tensorflow.compat.v1 import logging; logging.set_verbosity(logging.ERROR)

from keras.models import Model
from keras.layers import Input, Reshape, Conv2D, BatchNormalization, Activation, Flatten, Dense, Dropout, Add
from keras.initializers import TruncatedNormal, RandomUniform
from keras.optimizers import Adam
from keras.metrics import mse, categorical_accuracy
from keras.callbacks import ModelCheckpoint, TensorBoard
from keras.utils.vis_utils import plot_model
import numpy as np

from PIL import Image

class RNN():
    def __init__(self, env, net='rnn'):
        self.game = env.name
        self.net = net
        self.action_size = env.action_size # an int
        self.board_size = env.board_size # a tuple

        inputs = Input(shape=(self.board_size[0], self.board_size[1]*3) )
        inputs_reshape = Reshape((self.board_size[0], self.board_size[1]*3, 1) )(inputs)
        outer = Conv2D(32, kernel_size=2, strides=1, padding='same')(inputs_reshape)
        outer = BatchNormalization(axis=-1)(outer)
        outer = Activation('relu')(outer)

        inner = outer
        for _ in range(3): # number of blocks
            #inner = outer
Beispiel #25
0
#Импорт модулей
from tensorflow.compat.v1 import logging, placeholder, global_variables_initializer, Session
from tensorflow import Variable, random, zeros, name_scope, nn, matmul, sigmoid, reduce_mean, Graph
from tensorflow.compat.v1.train import GradientDescentOptimizer, Saver
from numpy import array, square

logging.set_verbosity(logging.ERROR)  #Не показываем лишние предупреждения

with Graph().as_default():  #Открываем граф как главный

    X = placeholder("float32", shape=[4, 2],
                    name='X')  #Создаём объект для хранения входа
    Y = placeholder("float32", shape=[4, 1],
                    name='Y')  #Создаём объект для хранения выхода

    W = Variable(
        random.uniform([2, 2], -1, 1), name="W"
    )  #Создаём переменную (наклон функции) с рандомным значением между -1 и 1 для входа
    w = Variable(
        random.uniform([2, 1], -1, 1), name="w"
    )  #Создаём переменную (наклон функции) с рандомным значением между -1 и 1 для выхода

    c = Variable(
        zeros([4, 2]), name="c"
    )  #Создаём переменную (смещение по оси координат) с нулями для входа
    b = Variable(
        zeros([4, 1]), name="b"
    )  #Создаём переменную (смещение по оси координат) с нулями для выхода

    with name_scope("hidden_layer") as scope:
        h = nn.relu(
Beispiel #26
0
import tensorflow as tf
from tensorflow.compat.v1 import logging
logging.set_verbosity("INFO")
logging.info("TF Version:{}".format(tf.__version__))
try:
    import horovod.tensorflow as hvd
    no_horovod = False
except ModuleNotFoundError:
    logging.warning("No horvod module, cannot perform distributed training")
    no_horovod = True

import os
import six
from types import SimpleNamespace
import pprint
import time
import functools

import numpy as np

from tensorflow.python.profiler import profiler_v2 as profiler

from graph_nets import utils_tf
from graph_nets import utils_np
import sonnet as snt

from root_gnn.utils import load_yaml
from root_gnn.src.datasets import graph
from root_gnn import model as all_models
from root_gnn import losses
Beispiel #27
0
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import numpy as np

import tensorflow as tf
from tensorflow.keras import layers
from tensorflow.python.keras import models
from . import model_resnet

from tensorflow.compat.v1 import logging as tf_logging
tf_logging.set_verbosity(tf_logging.INFO)


def keras_model(model_dir, learning_rate):
    """Creates a Keras Sequential model with layers.

  Args:
    model_dir: (str) file path where training files will be written.
    config: (tf.estimator.RunConfig) Configuration options to save model.
    learning_rate: (int) Learning rate.

  Returns:
    A keras.Model
  """
    # model = models.Sequential()
    # model.add(Flatten(input_shape=(28, 28)))
Beispiel #28
0
import tensorflow as tf
import tensorflow.io.gfile as gfile
import tensorflow.compat.v1.logging as logging
import tffilesync
import time
import os
import unittest
import tempfile

logging.set_verbosity(logging.DEBUG)


def _kick_sync(syncer: tffilesync.Syncer):
    epoch = syncer.epoch()
    syncer.kick()
    while epoch == syncer.epoch():
        time.sleep(0.1)


def _read_file(path: str) -> str:
    with open(path, 'r') as fd:
        return fd.read()


class TestSync(unittest.TestCase):
    def test_basic(self):
        with tempfile.TemporaryDirectory() as remotedir, \
             tempfile.TemporaryDirectory() as localdir:
            with open(remotedir + '/f0.txt', 'w') as fd:
                fd.write('hello0')
            syncer = tffilesync.Syncer(remotedir, localdir)
Beispiel #29
0
from tensorflow.compat.v1 import logging, placeholder, get_variable, Session
from tensorflow import Graph, name_scope, nn, matmul, sigmoid, reduce_mean, Graph
from tensorflow.compat.v1.train import Saver
from numpy import array, square

logging.set_verbosity(logging.ERROR)

with Graph().as_default() as g:
    X = placeholder("float32", shape=[4, 2], name='X')
    Y = placeholder("float32", shape=[4, 1], name='Y')

    W = get_variable(shape=[2, 2], name='W')
    w = get_variable(shape=[2, 1], name='w')

    c = get_variable(shape=[4, 2], name='c')
    b = get_variable(shape=[4, 1], name='b')

    with name_scope("hidden_layer") as scope:
        h = nn.relu(matmul(X, W) + c)

    with name_scope("output") as scope:
        y_estimated = sigmoid(matmul(h, w) + b)

    with name_scope("loss") as scope:
        loss = reduce_mean(square(y_estimated - Y))

    with Session() as sess:
        saver = Saver()
        saver.restore(sess, "/tmp/model.ckpt")
        print("Model restored.")
        print(
Beispiel #30
0
    keras_model.save(
        os.path.join(hparams.job_dir, "savedmodel"),
        save_format="tf",  # {"tf", "h5"}
    )
    # config = model.get_config()
    # reinitialized_model = keras.Model.from_config(config)


if __name__ == '__main__':

    # args = get_args()
    # tf.logging.set_verbosity(args.verbosity)
    # hparams = hparam.HParams(**args.__dict__)
    # train_and_evaluate(hparams)

    args = get_args()
    tf_logging.set_verbosity(args.verbosity)
    # hparams = hparam.HParams(**args.__dict__)
    train_and_evaluate(args)

# # %%
# from tensorboard.plugins.hparams import api as hp
# adict = {'num_epochs': 5, 'batch_size': 128,
#          'learning_rate': 0.01, 'verbosity': 'INFO'}
# hp.HParam(**adict)

# hp.hparams(adict)

# ?hp.hparams