Esempio n. 1
0
def test_compress(args):
    """Compresses an image."""
    fn = tf.placeholder(tf.string, [])

    # Load input image and add batch dimension.
    x = read_png(fn)
    x = tf.expand_dims(x, 0)
    x.set_shape([1, None, None, 3])
    x_shape = tf.shape(x)

    # Instantiate model.
    analysis_transform = AnalysisTransform(args.num_filters)
    synthesis_transform = SynthesisTransform(args.num_filters)
    hyper_analysis_transform = HyperAnalysisTransform(args.num_filters)
    hyper_synthesis_transform = HyperSynthesisTransform(args.num_filters)
    entropy_bottleneck = tfc.EntropyBottleneck()

    # Transform and compress the image.
    y = analysis_transform(x)
    y_shape = tf.shape(y)
    z = hyper_analysis_transform(abs(y))
    z_hat, z_likelihoods = entropy_bottleneck(z, training=False)
    sigma = hyper_synthesis_transform(z_hat)
    sigma = sigma[:, :y_shape[1], :y_shape[2], :]
    scale_table = np.exp(
        np.linspace(np.log(SCALES_MIN), np.log(SCALES_MAX), SCALES_LEVELS))
    conditional_bottleneck = DynamicGaussianConditional(
        sigma, scale_table, name="gaussian_conditional")

    side_string = entropy_bottleneck.compress(z)
    string = conditional_bottleneck.compress(y)

    # Transform the quantized image back (if requested).
    y_hat, y_likelihoods = conditional_bottleneck(y, training=False)
    x_hat = synthesis_transform(y_hat)
    x_hat = x_hat[:, :x_shape[1], :x_shape[2], :]

    num_pixels = tf.cast(tf.reduce_prod(tf.shape(x)[:-1]), dtype=tf.float32)

    # Total number of bits divided by number of pixels.
    eval_bpp = (tf.reduce_sum(tf.log(y_likelihoods)) + tf.reduce_sum(
        tf.log(z_likelihoods))) / (-np.log(2) * num_pixels)

    # Bring both images back to 0..255 range.
    x *= 255
    x_hat = tf.clip_by_value(x_hat, 0, 1)
    x_hat = tf.round(x_hat * 255)

    mse = tf.reduce_mean(tf.squared_difference(x, x_hat))
    psnr = tf.squeeze(tf.image.psnr(x_hat, x, 255))
    msssim = tf.squeeze(tf.image.ssim_multiscale(x_hat, x, 255))

    with tf.Session() as sess:
        # Load the latest model checkpoint, get the compressed string and the tensor
        # shapes.
        latest = tf.train.latest_checkpoint(checkpoint_dir=args.checkpoint_dir)
        tf.train.Saver().restore(sess, save_path=latest)
        #a = sess.run( tf.reduce_sum(tf.log(y_likelihoods), axis=(0,1,2)) / (-np.log(2) * num_pixels))
        #b = sess.run( tf.reduce_sum(tf.log(z_likelihoods), axis=(0,1,2)) / (-np.log(2) * num_pixels))
        #np.savetxt('ay.csv', a, delimiter = ',')
        #np.savetxt('bz.csv', b, delimiter = ',')
        #return

        const = tf.constant([1] * 256 + [0] * 224, dtype=tf.float32)
        f = open("e5.csv", "w")
        print("active, fn, bpp, mse, np", file=f)
        for active in range(256, 31, -16):
            #conditional_bottleneck.input_spec = tf.keras.layers.InputSpec(ndim=4, axes={3: active})
            mask = const[256 - active:512 - active]
            rate = tf.reduce_sum(mask) / 256
            y_itc = y * mask / rate

            string = conditional_bottleneck.compress(y_itc)
            y_itc_hat = conditional_bottleneck.decompress(string)

            # Transform the quantized image back (if requested).
            x_hat = synthesis_transform(y_itc_hat)
            x_hat = x_hat[:, :x_shape[1], :x_shape[2], :]

            eval_bpp = (tf.reduce_sum(tf.log(y_likelihoods[:, :, :, :active]))
                        + tf.reduce_sum(tf.log(z_likelihoods))) / (-np.log(2) *
                                                                   num_pixels)

            x_hat = tf.clip_by_value(x_hat, 0, 1)
            x_hat = tf.round(x_hat * 255)

            mse = tf.reduce_mean(tf.squared_difference(x, x_hat))
            psnr = tf.squeeze(tf.image.psnr(x_hat, x, 255))
            msssim = tf.squeeze(tf.image.ssim_multiscale(x_hat, x, 255))

            #tensors = [string, side_string,
            #          tf.shape(x)[1:-1], tf.shape(y)[1:-1], tf.shape(z)[1:-1]]
            #arrays = sess.run(tensors)

            # Write a binary file with the shape information and the compressed string.
            #packed = tfc.PackedTensors()
            #packed.pack(tensors, arrays)

            for filename in glob.glob("kodak/*.png"):

                v_eval_bpp, v_mse, v_num_pixels = sess.run(
                    [eval_bpp, mse, num_pixels], feed_dict={fn: filename})

                print("%.2f, %s, %.4f, %.4f, %d" %
                      (active, filename, v_eval_bpp, v_mse, v_num_pixels),
                      file=f)

        f.close()
Esempio n. 2
0
    def run(self):
        win_count = 0
        while True:
            try:

                tf.reset_default_graph()
                init = tf.global_variables_initializer()
                state_maker = StateMaker()

                if not os.path.exists(MODEL_PATH):
                    os.makedirs(MODEL_PATH)

                with tf.Session() as sess:
                    sess.run(init)

                    saver = tf.train.import_meta_graph(MODEL_PATH +
                                                       '/model-ddqn.ckpt.meta')
                    saver.restore(sess,
                                  tf.train.latest_checkpoint(MODEL_PATH + '/'))
                    graph = tf.get_default_graph()
                    online_in = graph.get_tensor_by_name('X_1:0')

                    # Run loop
                    info = self.ar.configure(self.id)
                    self.solved = [
                        0 for x in range(self.ar.get_number_of_levels())
                    ]

                    max_scores = np.zeros([len(self.solved)])

                    self.current_level = self.get_next_level()
                    self.ar.load_level(self.current_level)

                    if self.current_level >= 457:
                        print("Shutong: win_count: ", win_count)

                    s = 'None'
                    d = False
                    first_time_in_level_in_episode = True

                    for env_step in range(1, TOTAL_STEPS):
                        game_state = self.ar.get_game_state()
                        r = self.ar.get_current_score()

                        print('current score ', r)
                        print('win count: ', win_count)

                        #if(game_state == GameState.UNSTABLE):
                        #    self.get_next_level()

                        # First check if we are in the won or lost state
                        # to adjust the reward and done flag if needed
                        if game_state == GameState.WON:
                            # save current state before reloading the level
                            s = self.ar.do_screenshot()
                            s = state_maker.make(sess, s)
                            n_levels = self.update_no_of_levels()
                            print("number of levels ", n_levels)
                            self.check_my_score()
                            self.current_level = self.get_next_level()
                            self.ar.load_level(self.current_level)

                            win_count += 1

                            # Update reward and done
                            d = 1
                            first_time_in_level_in_episode = True

                        elif game_state == GameState.LOST:
                            # save current state before reloading the level
                            s = self.ar.do_screenshot()
                            s = state_maker.make(sess, s)

                            # check for change of number of levels in the game
                            n_levels = self.update_no_of_levels()
                            print("number of levels ", n_levels)
                            self.check_my_score()
                            # If lost, then restart the level
                            self.failed_counter += 1
                            if self.failed_counter > 1:  # for testing , go directly to the next level

                                self.failed_counter = 0
                                self.current_level = self.get_next_level()
                                self.ar.load_level(self.current_level)
                            else:
                                print("restart")
                                self.ar.load_level(self.current_level)

                            # Update reward and done
                            d = 1
                            first_time_in_level_in_episode = True

                        if (game_state == GameState.PLAYING):
                            # Start of the episode
                            if (first_time_in_level_in_episode):
                                # If first time in level reset states
                                s = 'None'
                                self.ar.fully_zoom_out()
                                self.ar.fully_zoom_out()
                                self.ar.fully_zoom_out()
                                self.ar.fully_zoom_out()
                                self.ar.fully_zoom_out()
                                self.ar.fully_zoom_out()
                                self.ar.fully_zoom_out()
                                first_time_in_level_in_episode = False

                            self.get_slingshot_center()

                            if self.sling_center == None:
                                print('sling ', self.sling_center)
                                continue
                            s = self.ar.do_screenshot()
                            s = state_maker.make(sess, s)

                            a = sess.run(
                                graph.get_tensor_by_name('ArgMax_1:0'),
                                feed_dict={online_in: [s]})

                            tap_time = int(random.randint(65, 100))
                            ax_pixels = -int(40 * math.cos(math.radians(a)))
                            ay_pixels = int(40 * math.sin(math.radians(a)))

                            print("Shoot: " + str(ax_pixels) + ", " +
                                  str(ay_pixels) + ", " + str(tap_time))
                            # Execute a in the environment
                            self.ar.shoot(int(self.sling_center.X),
                                          int(self.sling_center.Y), ax_pixels,
                                          ay_pixels, 0, tap_time, False)

                        elif game_state == GameState.LEVEL_SELECTION:
                            print(
                                "unexpected level selection page, go to the last current level : ",
                                self.current_level)
                            self.ar.load_level(self.current_level)

                        elif game_state == GameState.MAIN_MENU:
                            print(
                                "unexpected main menu page, reload the level : ",
                                self.current_level)
                            self.ar.load_level(self.current_level)

                        elif game_state == GameState.EPISODE_MENU:
                            print(
                                "unexpected episode menu page, reload the level: ",
                                self.current_level)
                            self.ar.load_level(self.current_level)

            except Exception as e:
                print("Error: ", e)
            finally:
                time.sleep(10)
Esempio n. 3
0
    def __init__(
        self,
        images: "tf.Tensor",
        model: Optional["FasterRCNNMetaArch"] = None,
        filename: Optional[str] = None,
        url: Optional[str] = None,
        sess: Optional["Session"] = None,
        is_training: bool = False,
        clip_values: Optional["CLIP_VALUES_TYPE"] = None,
        channels_first: bool = False,
        preprocessing_defences: Union["Preprocessor", List["Preprocessor"],
                                      None] = None,
        postprocessing_defences: Union["Postprocessor", List["Postprocessor"],
                                       None] = None,
        preprocessing: "PREPROCESSING_TYPE" = (0.0, 1.0),
        attack_losses: Tuple[str, ...] = (
            "Loss/RPNLoss/localization_loss",
            "Loss/RPNLoss/objectness_loss",
            "Loss/BoxClassifierLoss/localization_loss",
            "Loss/BoxClassifierLoss/classification_loss",
        ),
    ):
        """
        Initialization of an instance TensorFlowFasterRCNN.

        :param images: Input samples of shape (nb_samples, height, width, nb_channels).
        :param model: A TensorFlow Faster-RCNN model. The output that can be computed from the model includes a tuple
                      of (predictions, losses, detections):

                        - predictions: a dictionary holding "raw" prediction tensors.
                        - losses: a dictionary mapping loss keys (`Loss/RPNLoss/localization_loss`,
                                  `Loss/RPNLoss/objectness_loss`, `Loss/BoxClassifierLoss/localization_loss`,
                                  `Loss/BoxClassifierLoss/classification_loss`) to scalar tensors representing
                                  corresponding loss values.
                        - detections: a dictionary containing final detection results.
        :param filename: Filename of the detection model without filename extension.
        :param url: URL to download archive of detection model including filename extension.
        :param sess: Computation session.
        :param is_training: A boolean indicating whether the training version of the computation graph should be
                            constructed.
        :param clip_values: Tuple of the form `(min, max)` of floats or `np.ndarray` representing the minimum and
                            maximum values allowed for input image features. If floats are provided, these will be
                            used as the range of all features. If arrays are provided, each value will be considered
                            the bound for a feature, thus the shape of clip values needs to match the total number
                            of features.
        :param channels_first: Set channels first or last.
        :param preprocessing_defences: Preprocessing defence(s) to be applied by the classifier.
        :param postprocessing_defences: Postprocessing defence(s) to be applied by the classifier.
        :param preprocessing: Tuple of the form `(subtractor, divider)` of floats or `np.ndarray` of values to be
                              used for data preprocessing. The first value will be subtracted from the input. The
                              input will then be divided by the second one.
        :param attack_losses: Tuple of any combination of strings of the following loss components:
                              `first_stage_localization_loss`, `first_stage_objectness_loss`,
                              `second_stage_localization_loss`, `second_stage_classification_loss`.
        """
        import tensorflow.compat.v1 as tf  # lgtm [py/repeated-import]

        # Super initialization
        super().__init__(
            model=model,
            clip_values=clip_values,
            channels_first=channels_first,
            preprocessing_defences=preprocessing_defences,
            postprocessing_defences=postprocessing_defences,
            preprocessing=preprocessing,
        )

        # Check clip values
        if self.clip_values is not None:
            if not np.all(self.clip_values[0] == 0):
                raise ValueError(
                    "This classifier requires normalized input images with clip_vales=(0, 1)."
                )
            if not np.all(self.clip_values[1] == 1):  # pragma: no cover
                raise ValueError(
                    "This classifier requires normalized input images with clip_vales=(0, 1)."
                )

        # Check preprocessing and postprocessing defences
        if self.preprocessing_defences is not None:
            raise ValueError(
                "This estimator does not support `preprocessing_defences`.")
        if self.postprocessing_defences is not None:
            raise ValueError(
                "This estimator does not support `postprocessing_defences`.")

        # Create placeholders for groundtruth boxes
        self._groundtruth_boxes_list: List["tf.Tensor"]
        self._groundtruth_boxes_list = [
            tf.placeholder(dtype=tf.float32,
                           shape=(None, 4),
                           name=f"groundtruth_boxes_{i}")
            for i in range(images.shape[0])
        ]

        # Create placeholders for groundtruth classes
        self._groundtruth_classes_list: List["tf.Tensor"]
        self._groundtruth_classes_list = [
            tf.placeholder(dtype=tf.int32,
                           shape=(None, ),
                           name=f"groundtruth_classes_{i}")
            for i in range(images.shape[0])
        ]

        # Create placeholders for groundtruth weights
        self._groundtruth_weights_list: List["tf.Tensor"]
        self._groundtruth_weights_list = [
            tf.placeholder(dtype=tf.float32,
                           shape=(None, ),
                           name=f"groundtruth_weights_{i}")
            for i in range(images.shape[0])
        ]

        # Load model
        if model is None:
            # If model is None, then we need to have parameters filename and url to download, extract and load the
            # object detection model
            if filename is None or url is None:
                filename, url = (
                    "faster_rcnn_inception_v2_coco_2017_11_08",
                    "http://download.tensorflow.org/models/object_detection/"
                    "faster_rcnn_inception_v2_coco_2017_11_08.tar.gz",
                )

            self._model, self._predictions, self._losses, self._detections = self._load_model(
                images=images,
                filename=filename,
                url=url,
                obj_detection_model=None,
                is_training=is_training,
                groundtruth_boxes_list=self._groundtruth_boxes_list,
                groundtruth_classes_list=self._groundtruth_classes_list,
                groundtruth_weights_list=self._groundtruth_weights_list,
            )

        else:
            self._model, self._predictions, self._losses, self._detections = self._load_model(
                images=images,
                filename=None,
                url=None,
                obj_detection_model=model,
                is_training=is_training,
                groundtruth_boxes_list=self._groundtruth_boxes_list,
                groundtruth_classes_list=self._groundtruth_classes_list,
                groundtruth_weights_list=self._groundtruth_weights_list,
            )

        # Save new attributes
        self._input_shape = images.shape.as_list()[1:]
        self.is_training: bool = is_training
        self.images: Optional["tf.Tensor"] = images
        self.attack_losses: Tuple[str, ...] = attack_losses

        # Assign session
        if sess is None:
            logger.warning("A session cannot be None, create a new session.")
            self._sess = tf.Session()
        else:  # pragma: no cover
            self._sess = sess

        # Initialize variables
        self._sess.run(tf.global_variables_initializer())
        self._sess.run(tf.local_variables_initializer())
    def __init__(self, board_width, board_height, model_file=None):
        self.board_width = board_width
        self.board_height = board_height

        # Define the tensorflow neural network
        # 1. Input:
        self.input_states = tf.placeholder(
                tf.float32, shape=[None, 4, board_height, board_width])
        self.input_state = tf.transpose(self.input_states, [0, 2, 3, 1])
        # 2. Common Networks Layers
        self.conv1 = tf.layers.conv2d(inputs=self.input_state,
                                      filters=32, kernel_size=[3, 3],
                                      padding="same", data_format="channels_last",
                                      activation=tf.nn.relu)
        self.conv2 = tf.layers.conv2d(inputs=self.conv1, filters=64,
                                      kernel_size=[3, 3], padding="same",
                                      data_format="channels_last",
                                      activation=tf.nn.relu)
        self.conv3 = tf.layers.conv2d(inputs=self.conv2, filters=128,
                                      kernel_size=[3, 3], padding="same",
                                      data_format="channels_last",
                                      activation=tf.nn.relu)
        # 3-1 Action Networks
        self.action_conv = tf.layers.conv2d(inputs=self.conv3, filters=4,
                                            kernel_size=[1, 1], padding="same",
                                            data_format="channels_last",
                                            activation=tf.nn.relu)
        # Flatten the tensor
        self.action_conv_flat = tf.reshape(
                self.action_conv, [-1, 4 * board_height * board_width])
        # 3-2 Full connected layer, the output is the log probability of moves
        # on each slot on the board
        self.action_fc = tf.layers.dense(inputs=self.action_conv_flat,
                                         units=board_height * board_width,
                                         activation=tf.nn.log_softmax)
        # 4 Evaluation Networks
        self.evaluation_conv = tf.layers.conv2d(inputs=self.conv3, filters=2,
                                                kernel_size=[1, 1],
                                                padding="same",
                                                data_format="channels_last",
                                                activation=tf.nn.relu)
        self.evaluation_conv_flat = tf.reshape(
                self.evaluation_conv, [-1, 2 * board_height * board_width])
        self.evaluation_fc1 = tf.layers.dense(inputs=self.evaluation_conv_flat,
                                              units=64, activation=tf.nn.relu)
        # output the score of evaluation on current state
        self.evaluation_fc2 = tf.layers.dense(inputs=self.evaluation_fc1,
                                              units=1, activation=tf.nn.tanh)

        # Define the Loss function
        # 1. Label: the array containing if the game wins or not for each state
        self.labels = tf.placeholder(tf.float32, shape=[None, 1])
        # 2. Predictions: the array containing the evaluation score of each state
        # which is self.evaluation_fc2
        # 3-1. Value Loss function
        self.value_loss = tf.losses.mean_squared_error(self.labels,
                                                       self.evaluation_fc2)
        # 3-2. Policy Loss function
        self.mcts_probs = tf.placeholder(
                tf.float32, shape=[None, board_height * board_width])
        self.policy_loss = tf.negative(tf.reduce_mean(
                tf.reduce_sum(tf.multiply(self.mcts_probs, self.action_fc), 1)))
        # 3-3. L2 penalty (regularization)
        l2_penalty_beta = 1e-4
        vars = tf.trainable_variables()
        l2_penalty = l2_penalty_beta * tf.add_n(
            [tf.nn.l2_loss(v) for v in vars if 'bias' not in v.name.lower()])
        # 3-4 Add up to be the Loss function
        self.loss = self.value_loss + self.policy_loss + l2_penalty

        # Define the optimizer we use for training
        self.learning_rate = tf.placeholder(tf.float32)
        self.optimizer = tf.train.AdamOptimizer(
                learning_rate=self.learning_rate).minimize(self.loss)

        # Make a session
        self.session = tf.Session()

        # calc policy entropy, for monitoring only
        self.entropy = tf.negative(tf.reduce_mean(
                tf.reduce_sum(tf.exp(self.action_fc) * self.action_fc, 1)))

        # Initialize variables
        init = tf.global_variables_initializer()
        self.session.run(init)

        # For saving and restoring
        self.saver = tf.train.Saver()
        if model_file is not None:
            self.restore_model(model_file)
Esempio n. 5
0
def main(unused_args):

    logging.set_verbosity(logging.INFO)
    tf.random.set_random_seed(SEED)

    logging.info('hops %d lr %f train %s test %s dev %s', FLAGS.num_hops,
                 FLAGS.learning_rate, FLAGS.train_file, FLAGS.test_file,
                 FLAGS.dev_file)

    # set up the builder, context, and datasets
    builder = MetaQABuilder()
    context = builder.build_context()

    train_dset_fn = make_dset_fn(context,
                                 FLAGS.train_file,
                                 shuffle=True,
                                 epochs=FLAGS.epochs,
                                 n_take=FLAGS.num_train)
    test_dset_fn = make_dset_fn(context,
                                FLAGS.test_file,
                                shuffle=True,
                                epochs=1)

    if FLAGS.action == 'train_ph':
        feature_ph_dict, labels_ph = make_feature_label_ph_pair(context)
        model = builder.build_model(feature_ph_dict,
                                    labels_ph,
                                    context=context)
        with tf.Session() as session:
            trainer = util.Trainer(session, model, feature_ph_dict, labels_ph)
            trainer.train(train_dset_fn())
            tf.train.Saver().save(session,
                                  FLAGS.checkpoint_dir + '/final.chpt')

    elif FLAGS.action == 'test_ph':
        feature_ph_dict, labels_ph = make_feature_label_ph_pair(context)
        model = builder.build_model(feature_ph_dict,
                                    labels_ph,
                                    context=context)
        with tf.Session() as session:
            tf.train.Saver().restore(session,
                                     FLAGS.checkpoint_dir + '/final.chpt')
            trainer = util.Trainer(session, model, feature_ph_dict, labels_ph)
            evaluation = trainer.evaluate(test_dset_fn())
            print('evaluation', evaluation)

    elif FLAGS.action == 'expt_ph':
        feature_ph_dict, labels_ph = make_feature_label_ph_pair(context)
        model = builder.build_model(feature_ph_dict,
                                    labels_ph,
                                    context=context)

        with tf.Session() as session:
            trainer = util.Trainer(session, model, feature_ph_dict, labels_ph)

            callback_dset_fn = make_dset_fn(context,
                                            FLAGS.dev_file,
                                            shuffle=True,
                                            epochs=1,
                                            n_take=FLAGS.online_eval_size)

            evaluation = trainer.evaluate(callback_dset_fn())
            tf.logging.info('before training: %s', evaluation)

            # set up callback to evaluate on dev every so often
            model.num_steps = 0
            old_callback = model.training_callback

            def eval_periodically_callback(fd, latest_loss, elapsed_time):
                # default callback increments model.num_examples
                status = old_callback(fd, latest_loss, elapsed_time)
                model.num_steps += 1
                if (model.num_steps % FLAGS.steps_between_evals) == 0:
                    tf.logging.info('running eval on heldout dev set...')
                    evaluation = trainer.evaluate(callback_dset_fn())
                    tf.logging.info('after %d examples: %s',
                                    model.num_examples, evaluation)
                return status

            model.training_callback = eval_periodically_callback

            trainer.train(train_dset_fn())
            evaluation = trainer.evaluate(test_dset_fn())
            tf.logging.info('final evaluation %s', evaluation)
            try:
                tf.train.Saver().save(session,
                                      FLAGS.checkpoint_dir + '/final.chpt')
            except ValueError:
                tf.logging.error('fail to save model at %s',
                                 FLAGS.checkpoint_dir + '/final.chpt')

    else:
        raise ValueError('illegal action')
Esempio n. 6
0
def lagrangian_optimizer_kld(train_set, additive_slack, learning_rate,
                             learning_rate_constraint, loops):
    """Implements surrogate-based Lagrangian optimizer (Algorithm 2).

  Specifically solves:
    min_{theta} sum_{G = 0, 1} KLD(p, pprG(theta))
      s.t. error_rate <= additive_slack,
    where p is the overall proportion of positives and pprG is the positive
    prediction rate for group G.

  We frame this as a constrained optimization problem:
    min_{theta, xi_pos0, xi_pos1, xi_neg0, xi_neg1} {
      -p log(xi_pos0) - (1-p) log(xi_neg0) - p log(xi_pos1)
        -(1-p) log(xi_neg1)}
    s.t.
      error_rate <= additive_slack,
        xi_pos0 <= ppr0(theta), xi_neg0 <= npr0(theta),
        xi_pos1 <= ppr1(theta), xi_neg1 <= npr1(theta),
  and formulate the Lagrangian:
    max_{lambda's >= 0} min_{xi's} {
      -p log(xi_pos0) - (1-p) log(xi_neg0) - p log(xi_pos1)
        -(1-p) log(xi_neg1)
       + lambda_pos0 (xi_pos0 - ppr0(theta))
       + lambda_neg0 (xi_neg0 - npr0(theta))
       + lambda_pos1 (xi_pos1 - ppr1(theta))
       + lambda_neg1 (xi_neg1 - npr1(theta))}
    s.t.
      error_rate <= additive_slack.

  We do best response for the slack variables xi:
    BR for xi_pos0 = p / lambda_pos0
    BR for xi_neg0 = (1 - p) / lambda_neg0
    BR for xi_pos1 = p / lambda_pos1
    BR for xi_neg1 = (1 - p) / lambda_neg1
  We do gradient ascent on the lambda's, where
    Gradient w.r.t. lambda_pos0
      = BR for xi_pos0 - ppr0(theta)
      = p / lambda_pos0 - ppr0(theta)
      = Gradient w.r.t. lambda_pos0 of
        (p log(lambda_pos0) - lambda_pos0 ppr0(theta))
    Gradient w.r.t. lambda_neg0
      = Gradient w.r.t. lambda_neg0 of
        ((1 - p) log(lambda_neg0) - lambda_neg0 npr0(theta))
    Gradient w.r.t. lambda_pos1
      = Gradient w.r.t. lambda_pos1 of
        (p log(lambda_pos1) - lambda_pos1 ppr1(theta))
    Gradient w.r.t. lambda_neg1
      = Gradient w.r.t. lambda_neg1 of
        ((1 - p) log(lambda_neg1) - lambda_neg1 npr1(theta)).
  We do gradient descent on thetas's, with ppr's and npr's replaced with hinge
  surrogates. We use concave lower bounds on ppr's and npr's, so that when they
  get negated in the updates, we get convex upper bounds.

  See Appendix D.1 in the paper for more details.

  Args:
    train_set: (features, labels, groups)
    additive_slack: float, additive slack on error rate constraint
    learning_rate: float, learning rate for model parameters
    learning_rate_constraint: float, learning rate for Lagrange multipliers
    loops: int, number of iterations

  Returns:
    stochastic_model containing list of models and probabilities,
    deterministic_model.
  """
    x_train, y_train, z_train = train_set
    dimension = x_train.shape[-1]

    tf.reset_default_graph()

    # Data tensors.
    features_tensor = tf.constant(x_train.astype("float32"), name="features")
    labels_tensor = tf.constant(y_train.astype("float32"), name="labels")

    # Linear model.
    weights = tf.Variable(tf.zeros(dimension, dtype=tf.float32),
                          name="weights")
    threshold = tf.Variable(0, name="threshold", dtype=tf.float32)
    predictions_tensor = (tf.tensordot(features_tensor, weights, axes=(1, 0)) +
                          threshold)

    # Group-specific predictions.
    predictions_group0 = tf.boolean_mask(predictions_tensor,
                                         mask=(z_train < 1))
    num_examples0 = np.sum(z_train < 1)
    predictions_group1 = tf.boolean_mask(predictions_tensor,
                                         mask=(z_train > 0))
    num_examples1 = np.sum(z_train > 0)

    # We use the TF Constrained Optimization (TFCO) library to set up the
    # constrained optimization problem. The library doesn't currently support best
    # responses for slack variables. So we maintain explicit Lagrange multipliers
    # for the slack variables, and let the library deal with the Lagrange
    # multipliers for the error rate constraint.

    # Since we need to perform a gradient descent update on the model parameters,
    # and an ascent update on the Lagrange multipliers on the slack variables, we
    # create a single "minimization" objective using stop gradients, where a
    # descent gradient update has the effect of minimizing over the model
    # parameters and maximizing over the Lagrange multipliers for the slack
    # variables. As noted above, the ascent update on the Lagrange multipliers for
    # the error rate constraint is done by the library internally.

    # Placeholders for Lagrange multipliers for the four slack variables.
    lambda_pos0 = tf.Variable(0.5, dtype=tf.float32, name="lambda_pos0")
    lambda_neg0 = tf.Variable(0.5, dtype=tf.float32, name="lambda_neg0")
    lambda_pos1 = tf.Variable(0.5, dtype=tf.float32, name="lambda_pos1")
    lambda_neg1 = tf.Variable(0.5, dtype=tf.float32, name="lambda_neg1")

    # Set up prediction rates and surrogate relaxations on them.
    p = np.mean(y_train)  # Proportion of positives.

    # Positive and negative prediction rates for group 0 and group 1.
    ppr_group0 = tf.reduce_sum(
        tf.cast(
            tf.greater(predictions_group0,
                       tf.zeros(num_examples0, dtype="float32")),
            "float32")) / num_examples0
    npr_group0 = 1 - ppr_group0
    ppr_group1 = tf.reduce_sum(
        tf.cast(
            tf.greater(predictions_group1,
                       tf.zeros(num_examples1, dtype="float32")),
            "float32")) / num_examples1
    npr_group1 = 1 - ppr_group1

    # Hinge concave lower bounds on the positive and negative prediction rates.
    # In the gradient updates, these get negated and become convex upper bounds.
    # For group 0:
    ppr_hinge_group0 = tf.reduce_sum(
        1 - tf.nn.relu(1 - predictions_group0)) * 1.0 / num_examples0
    npr_hinge_group0 = tf.reduce_sum(
        1 - tf.nn.relu(1 + predictions_group0)) * 1.0 / num_examples0
    # For group 1:
    ppr_hinge_group1 = tf.reduce_sum(
        1 - tf.nn.relu(1 - predictions_group1)) * 1.0 / num_examples1
    npr_hinge_group1 = tf.reduce_sum(
        1 - tf.nn.relu(1 + predictions_group1)) * 1.0 / num_examples1

    # Set up KL-divergence objective for constrained optimization.
    # We use stop gradients to ensure that a single descent gradient update on the
    # objective has the effect of minimizing over the model parameters and
    # maximizing over the Lagrange multipliers for the slack variables.

    # KL-divergence for group 0.
    kld_hinge_pos_group0 = (-tf.stop_gradient(lambda_pos0) * ppr_hinge_group0 -
                            p * tf.log(lambda_pos0) +
                            lambda_pos0 * tf.stop_gradient(ppr_group0))
    kld_hinge_neg_group0 = (-tf.stop_gradient(lambda_neg0) * npr_hinge_group0 -
                            (1 - p) * tf.log(lambda_neg0) +
                            lambda_neg0 * tf.stop_gradient(npr_group0))
    kld_hinge_group0 = kld_hinge_pos_group0 + kld_hinge_neg_group0

    # KL-divergence for group 1.
    kld_hinge_pos_group1 = (-tf.stop_gradient(lambda_pos1) * ppr_hinge_group1 -
                            p * tf.log(lambda_pos1) +
                            lambda_pos1 * tf.stop_gradient(ppr_group1))
    kld_hinge_neg_group1 = (-tf.stop_gradient(lambda_neg1) * npr_hinge_group1 -
                            (1 - p) * tf.log(lambda_neg1) +
                            lambda_neg1 * tf.stop_gradient(npr_group1))
    kld_hinge_group1 = kld_hinge_pos_group1 + kld_hinge_neg_group1

    # Wrap the objective into a rate object.
    objective = tfco.wrap_rate(kld_hinge_group0 + kld_hinge_group1)

    # Set up error rate constraint for constrained optimization.
    context = tfco.rate_context(predictions_tensor, labels_tensor)
    error = tfco.error_rate(context)
    constraints = [error <= additive_slack]

    # Cretae rate minimization problem object.
    problem = tfco.RateMinimizationProblem(objective, constraints)

    # Set up optimizer.
    optimizer = tfco.LagrangianOptimizerV1(
        tf.train.AdamOptimizer(learning_rate=learning_rate),
        constraint_optimizer=tf.train.AdamOptimizer(
            learning_rate=learning_rate_constraint))
    train_op = optimizer.minimize(problem)

    # Start TF session and initialize variables.
    session = tf.Session()
    session.run(tf.global_variables_initializer())

    # We maintain a list of objectives and model weights during training.
    objectives = []
    violations = []
    models = []

    # Perform full gradient updates.
    for ii in range(loops):

        # Gradient updates.
        session.run(train_op)

        # Checkpoint once in 10 iterations.
        if ii % 10 == 0:
            # Model weights.
            model = [session.run(weights), session.run(threshold)]
            models.append(model)

            # Objective.
            klds = evaluation.expected_group_klds(x_train, y_train, z_train,
                                                  [model], [1.0])
            objectives.append(sum(klds))

            # Violation.
            error = evaluation.expected_error_rate(x_train, y_train, [model],
                                                   [1.0])
            violations.append([error - additive_slack])

    # Use the recorded objectives and constraints to find the best iterate.
    best_iterate = tfco.find_best_candidate_index(np.array(objectives),
                                                  np.array(violations))
    deterministic_model = models[best_iterate]

    # Use shrinking to find a sparse distribution over iterates.
    probabilities = tfco.find_best_candidate_distribution(
        np.array(objectives), np.array(violations))
    models_pruned = [
        models[i] for i in range(len(models)) if probabilities[i] > 0.0
    ]
    probabilities_pruned = probabilities[probabilities > 0.0]

    return (models_pruned, probabilities_pruned), deterministic_model
Esempio n. 7
0
def evaluate(test_csvs, create_model):
    if FLAGS.scorer_path:
        scorer = Scorer(FLAGS.lm_alpha, FLAGS.lm_beta,
                        FLAGS.scorer_path, Config.alphabet)
    else:
        scorer = None

    test_sets = [create_dataset([csv], batch_size=FLAGS.test_batch_size, train_phase=False) for csv in test_csvs]
    iterator = tfv1.data.Iterator.from_structure(tfv1.data.get_output_types(test_sets[0]),
                                                 tfv1.data.get_output_shapes(test_sets[0]),
                                                 output_classes=tfv1.data.get_output_classes(test_sets[0]))
    test_init_ops = [iterator.make_initializer(test_set) for test_set in test_sets]

    batch_wav_filename, (batch_x, batch_x_len), batch_y = iterator.get_next()

    # One rate per layer
    no_dropout = [None] * 6
    logits, _ = create_model(batch_x=batch_x,
                             batch_size=FLAGS.test_batch_size,
                             seq_length=batch_x_len,
                             dropout=no_dropout)

    # Transpose to batch major and apply softmax for decoder
    transposed = tf.nn.softmax(tf.transpose(a=logits, perm=[1, 0, 2]))

    loss = tfv1.nn.ctc_loss(labels=batch_y,
                            inputs=logits,
                            sequence_length=batch_x_len)

    tfv1.train.get_or_create_global_step()

    # Get number of accessible CPU cores for this process
    try:
        num_processes = cpu_count()
    except NotImplementedError:
        num_processes = 1

    with tfv1.Session(config=Config.session_config) as session:
        load_graph_for_evaluation(session)

        def run_test(init_op, dataset):
            wav_filenames = []
            losses = []
            predictions = []
            ground_truths = []

            bar = create_progressbar(prefix='Test epoch | ',
                                     widgets=['Steps: ', progressbar.Counter(), ' | ', progressbar.Timer()]).start()
            log_progress('Test epoch...')

            step_count = 0

            # Initialize iterator to the appropriate dataset
            session.run(init_op)

            # First pass, compute losses and transposed logits for decoding
            while True:
                try:
                    batch_wav_filenames, batch_logits, batch_loss, batch_lengths, batch_transcripts = \
                        session.run([batch_wav_filename, transposed, loss, batch_x_len, batch_y])
                except tf.errors.OutOfRangeError:
                    break

                decoded = ctc_beam_search_decoder_batch(batch_logits, batch_lengths, Config.alphabet, FLAGS.beam_width,
                                                        num_processes=num_processes, scorer=scorer,
                                                        cutoff_prob=FLAGS.cutoff_prob, cutoff_top_n=FLAGS.cutoff_top_n)
                predictions.extend(d[0][1] for d in decoded)
                ground_truths.extend(sparse_tensor_value_to_texts(batch_transcripts, Config.alphabet))
                wav_filenames.extend(wav_filename.decode('UTF-8') for wav_filename in batch_wav_filenames)
                losses.extend(batch_loss)

                step_count += 1
                bar.update(step_count)

            bar.finish()

            # Print test summary
            test_samples = calculate_and_print_report(wav_filenames, ground_truths, predictions, losses, dataset)
            return test_samples

        samples = []
        for csv, init_op in zip(test_csvs, test_init_ops):
            print('Testing model on {}'.format(csv))
            samples.extend(run_test(init_op, dataset=csv))
        return samples
Esempio n. 8
0
    def benchmark_model(self,
                        warmup_runs,
                        bm_runs,
                        num_threads,
                        trace_filename=None):
        """Benchmark model."""
        if self.tensorrt:
            print('Using tensorrt ', self.tensorrt)
            self.build_and_save_model()
            graphdef = self.freeze_model()

        if num_threads > 0:
            print('num_threads for benchmarking: {}'.format(num_threads))
            sess_config = tf.ConfigProto(
                intra_op_parallelism_threads=num_threads,
                inter_op_parallelism_threads=1)
        else:
            sess_config = tf.ConfigProto()

        # rewriter_config_pb2.RewriterConfig.OFF
        sess_config.graph_options.rewrite_options.dependency_optimization = 2
        if self.use_xla:
            sess_config.graph_options.optimizer_options.global_jit_level = (
                tf.OptimizerOptions.ON_2)

        with tf.Graph().as_default(), tf.Session(config=sess_config) as sess:
            inputs = tf.placeholder(tf.float32,
                                    name='input',
                                    shape=self.inputs_shape)
            output = self.build_model(inputs, is_training=False)

            img = np.random.uniform(size=self.inputs_shape)

            sess.run(tf.global_variables_initializer())
            if self.tensorrt:
                fetches = [inputs.name] + [i.name for i in output]
                goutput = self.convert_tr(graphdef, fetches)
                inputs, output = goutput[0], goutput[1:]

            if not self.use_xla:
                # Don't use tf.group because XLA removes the whole graph for tf.group.
                output = tf.group(*output)
            for i in range(warmup_runs):
                start_time = time.time()
                sess.run(output, feed_dict={inputs: img})
                print('Warm up: {} {:.4f}s'.format(i,
                                                   time.time() - start_time))
            print('Start benchmark runs total={}'.format(bm_runs))
            timev = []
            for i in range(bm_runs):
                if trace_filename and i == (bm_runs // 2):
                    run_options = tf.RunOptions()
                    run_options.trace_level = tf.RunOptions.FULL_TRACE
                    run_metadata = tf.RunMetadata()
                    sess.run(output,
                             feed_dict={inputs: img},
                             options=run_options,
                             run_metadata=run_metadata)
                    logging.info('Dumping trace to %s', trace_filename)
                    trace_dir = os.path.dirname(trace_filename)
                    if not tf.io.gfile.exists(trace_dir):
                        tf.io.gfile.makedirs(trace_dir)
                    with tf.io.gfile.GFile(trace_filename, 'w') as trace_file:
                        from tensorflow.python.client import timeline  # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top
                        trace = timeline.Timeline(
                            step_stats=run_metadata.step_stats)
                        trace_file.write(
                            trace.generate_chrome_trace_format(
                                show_memory=True))

                start_time = time.time()
                sess.run(output, feed_dict={inputs: img})
                timev.append(time.time() - start_time)

            timev.sort()
            timev = timev[2:bm_runs - 2]
            print(
                '{} {}runs {}threads: mean {:.4f} std {:.4f} min {:.4f} max {:.4f}'
                .format(self.model_name, len(timev), num_threads,
                        np.mean(timev), np.std(timev), np.min(timev),
                        np.max(timev)))
Esempio n. 9
0
def main(_):
    #print parameters
    pp.pprint(tf.app.flags.FLAGS.flag_values_dict())
    #folders
    if FLAGS.dataset == 'uniform':
        if FLAGS.architecture == 'fc':
            FLAGS.sample_dir = 'samples fc/' + 'dataset_' + FLAGS.dataset + '_num_samples_' + str(FLAGS.num_samples) +\
            '_num_neurons_' + str(FLAGS.num_neurons) + '_num_bins_' + str(FLAGS.num_bins)\
            + '_ref_period_' + str(FLAGS.ref_period) + '_firing_rate_' + str(FLAGS.firing_rate) + '_correlation_' + str(FLAGS.correlation) +\
            '_group_size_' + str(FLAGS.group_size)  + '_critic_iters_' + str(FLAGS.critic_iters) + '_lambda_' + str(FLAGS.lambd) +\
             '_num_units_' + str(FLAGS.num_units) +\
            '_iteration_' + FLAGS.iteration + '/'
        elif FLAGS.architecture == 'conv':
            FLAGS.sample_dir = 'samples conv/' + 'dataset_' + FLAGS.dataset + '_num_samples_' + str(FLAGS.num_samples) +\
            '_num_neurons_' + str(FLAGS.num_neurons) + '_num_bins_' + str(FLAGS.num_bins)\
            + '_ref_period_' + str(FLAGS.ref_period) + '_firing_rate_' + str(FLAGS.firing_rate) + '_correlation_' + str(FLAGS.correlation) +\
            '_group_size_' + str(FLAGS.group_size)  + '_critic_iters_' + str(FLAGS.critic_iters) + '_lambda_' + str(FLAGS.lambd) +\
            '_num_layers_' + str(FLAGS.num_layers)  + '_num_features_' + str(FLAGS.num_features) + '_kernel_' + str(FLAGS.kernel_width) +\
            '_iteration_' + FLAGS.iteration + '/'
    elif FLAGS.dataset == 'packets' and FLAGS.number_of_modes == 1:
        if FLAGS.architecture == 'fc':
            FLAGS.sample_dir = 'samples fc/' + 'dataset_' + FLAGS.dataset + '_num_samples_' + str(FLAGS.num_samples) +\
            '_num_neurons_' + str(FLAGS.num_neurons) + '_num_bins_' + str(FLAGS.num_bins) + '_packet_prob_' + str(FLAGS.packet_prob)\
            + '_firing_rate_' + str(FLAGS.firing_rate) + '_group_size_' + str(FLAGS.group_size) + '_critic_iters_' +\
            str(FLAGS.critic_iters) + '_lambda_' + str(FLAGS.lambd) + '_num_units_' + str(FLAGS.num_units) +\
            '_iteration_' + FLAGS.iteration + '/'
        elif FLAGS.architecture == 'conv':
            FLAGS.sample_dir = 'samples conv/' + 'dataset_' + FLAGS.dataset + '_num_samples_' + str(FLAGS.num_samples) +\
            '_num_neurons_' + str(FLAGS.num_neurons) + '_num_bins_' + str(FLAGS.num_bins) + '_packet_prob_' + str(FLAGS.packet_prob)\
            + '_firing_rate_' + str(FLAGS.firing_rate) + '_group_size_' + str(FLAGS.group_size) + '_critic_iters_' +\
            str(FLAGS.critic_iters) + '_lambda_' + str(FLAGS.lambd) +\
            '_num_layers_' + str(FLAGS.num_layers)  + '_num_features_' + str(FLAGS.num_features) + '_kernel_' + str(FLAGS.kernel_width) +\
            '_iteration_' + FLAGS.iteration + '/'
    elif FLAGS.dataset == 'packets' and FLAGS.number_of_modes == 2:
        if FLAGS.architecture == 'fc':
            FLAGS.sample_dir = 'samples fc/' + 'dataset_' + FLAGS.dataset + '_num_samples_' + str(FLAGS.num_samples) +\
            '_num_neurons_' + str(FLAGS.num_neurons) + '_num_bins_' + str(FLAGS.num_bins) + '_packet_prob_' + str(FLAGS.packet_prob)\
            + '_firing_rate_' + str(FLAGS.firing_rate) + '_group_size_' + str(FLAGS.group_size) + '_noise_in_packet_' + str(FLAGS.noise_in_packet) + '_number_of_modes_' + str(FLAGS.number_of_modes)  + '_critic_iters_' +\
            str(FLAGS.critic_iters) + '_lambda_' + str(FLAGS.lambd) + '_num_units_' + str(FLAGS.num_units) +\
            '_iteration_' + FLAGS.iteration + '/'
        elif FLAGS.architecture == 'conv':
            FLAGS.sample_dir = 'samples conv/' + 'dataset_' + FLAGS.dataset + '_num_samples_' + str(FLAGS.num_samples) +\
            '_num_neurons_' + str(FLAGS.num_neurons) + '_num_bins_' + str(FLAGS.num_bins) + '_packet_prob_' + str(FLAGS.packet_prob)\
            + '_firing_rate_' + str(FLAGS.firing_rate) + '_group_size_' + str(FLAGS.group_size) + '_noise_in_packet_' + str(FLAGS.noise_in_packet) + '_number_of_modes_' + str(FLAGS.number_of_modes)  + '_critic_iters_' +\
            str(FLAGS.critic_iters) + '_lambda_' + str(FLAGS.lambd) +\
            '_num_layers_' + str(FLAGS.num_layers)  + '_num_features_' + str(FLAGS.num_features) + '_kernel_' + str(FLAGS.kernel_width) +\
            '_iteration_' + FLAGS.iteration + '/'
    elif FLAGS.dataset == 'retina':
        if FLAGS.architecture == 'fc':
            FLAGS.sample_dir = 'samples fc/' + 'dataset_' + FLAGS.dataset  +\
            '_num_neurons_' + str(FLAGS.num_neurons) + '_num_bins_' + str(FLAGS.num_bins)\
            + '_critic_iters_' + str(FLAGS.critic_iters) + '_lambda_' + str(FLAGS.lambd) +\
            '_num_units_' + str(FLAGS.num_units) +\
            '_iteration_' + FLAGS.iteration + '/'
        elif FLAGS.architecture == 'conv':
            FLAGS.sample_dir = 'samples conv/' + 'dataset_' + FLAGS.dataset + '_num_samples_' + str(FLAGS.num_samples) +\
                               '_num_neurons_' + str(FLAGS.num_neurons) + '_num_bins_' + str(FLAGS.num_bins)\
                               + '_critic_iters_' + str(FLAGS.critic_iters) + '_lambda_' + str(FLAGS.lambd) +\
                               '_num_layers_' + str(FLAGS.num_layers)  + '_num_features_' + str(FLAGS.num_features) + '_kernel_' + str(FLAGS.kernel_width) +\
                               '_iteration_' + FLAGS.iteration + '/'
    elif FLAGS.dataset == 'maxent':
        if FLAGS.architecture == 'fc':
            FLAGS.sample_dir =  'samples fc/' + 'dataset_' + FLAGS.dataset + '_num_samples_' + str(FLAGS.num_samples) +\
                                '_num_neurons_' + str(FLAGS.num_neurons) + '_num_bins_' + str(FLAGS.num_bins)\
                                + '_critic_iters_' + str(FLAGS.critic_iters) + '_lambda_' + str(FLAGS.lambd) +\
                                '_num_layers_' + str(FLAGS.num_layers)  + '_num_units_' + str(FLAGS.num_units) +\
                                '_iteration_' + FLAGS.iteration + '/'

    FLAGS.checkpoint_dir = FLAGS.sample_dir + 'checkpoint/'
    if not os.path.exists(FLAGS.checkpoint_dir):
        os.makedirs(FLAGS.checkpoint_dir)
    if not os.path.exists(FLAGS.sample_dir):
        os.makedirs(FLAGS.sample_dir)

    if FLAGS.recovery_dir == "" and os.path.exists(FLAGS.sample_dir +
                                                   '/stats_real.npz'):
        FLAGS.recovery_dir = FLAGS.sample_dir

    run_config = tf.ConfigProto()
    run_config.gpu_options.allow_growth = True

    with tf.Session(config=run_config) as sess:
        wgan = WGAN_conv(sess,
                         architecture=FLAGS.architecture,
                         num_neurons=FLAGS.num_neurons,
                         num_bins=FLAGS.num_bins,
                         num_layers=FLAGS.num_layers,
                         num_units=FLAGS.num_units,
                         num_features=FLAGS.num_features,
                         kernel_width=FLAGS.kernel_width,
                         lambd=FLAGS.lambd,
                         batch_size=FLAGS.batch_size,
                         checkpoint_dir=FLAGS.checkpoint_dir,
                         sample_dir=FLAGS.sample_dir,
                         num_samples_for_diagnostics=FLAGS.num_samples)

        if FLAGS.is_train:
            training_samples, dev_samples = data_provider.generate_spike_trains(
                FLAGS, FLAGS.recovery_dir)
            wgan.training_samples = training_samples
            wgan.dev_samples = dev_samples
            print('data loaded')
            wgan.train(FLAGS)
        else:
            if not wgan.load(FLAGS.training_stage):
                raise Exception("[!] Train a model first, then run test mode")

        #LOAD TRAINING DATASET (and its statistics)
        original_dataset = np.load(FLAGS.sample_dir + '/stats_real.npz')

        #PLOT FILTERS
        if FLAGS.dataset in ['retina', 'maxent']:
            index = np.arange(FLAGS.num_neurons)
        else:
            index = np.argsort(original_dataset['shuffled_index'])

        if FLAGS.architecture == 'conv':
            print('get filters -----------------------------------')
            filters = wgan.get_filters(num_samples=64)
            visualize_filters_and_units.plot_filters(filters, sess, FLAGS,
                                                     index)

        #GET GENERATED SAMPLES AND COMPUTE THEIR STATISTICS
        print('compute stats -----------------------------------')
        if 'samples' not in original_dataset:
            if FLAGS.dataset == 'retina':
                real_samples = retinal_data.get_samples(
                    num_bins=FLAGS.num_bins,
                    num_neurons=FLAGS.num_neurons,
                    instance=FLAGS.data_instance,
                    folder=os.getcwd() + '/data/retinal data/')
            elif FLAGS.dataset == 'maxent':
                real_samples = maxent_data.get_samples()

        else:
            real_samples = original_dataset['samples']
        sim_pop_activity.plot_samples(real_samples, FLAGS.num_neurons,
                                      FLAGS.sample_dir, 'real')
        fake_samples = wgan.get_samples(num_samples=FLAGS.num_samples)
        fake_samples = fake_samples.eval(session=sess)
        sim_pop_activity.plot_samples(fake_samples.T, FLAGS.num_neurons,
                                      FLAGS.sample_dir, 'fake')
        _, _, _, _, _ = analysis.get_stats(X=fake_samples.T,
                                           num_neurons=FLAGS.num_neurons,
                                           num_bins=FLAGS.num_bins,
                                           folder=FLAGS.sample_dir,
                                           name='fake',
                                           instance=FLAGS.data_instance)

        #EVALUATE HIGH ORDER FEATURES (only when dimensionality is low)
        if FLAGS.dataset == 'uniform' and FLAGS.num_bins * FLAGS.num_neurons < 40:
            print(
                'compute high order statistics-----------------------------------'
            )
            num_trials = int(2**8)
            num_samples_per_trial = 2**13
            fake_samples_mat = np.zeros((num_trials * num_samples_per_trial,
                                         FLAGS.num_neurons * FLAGS.num_bins))
            for ind_tr in range(num_trials):
                fake_samples = wgan.sess.run([wgan.ex_samples])[0]
                fake_samples_mat[ind_tr * num_samples_per_trial:(ind_tr + 1) *
                                 num_samples_per_trial, :] = fake_samples

            analysis.evaluate_approx_distribution(X=fake_samples_mat.T, folder=FLAGS.sample_dir, num_samples_theoretical_distr=2**21,num_bins=FLAGS.num_bins, num_neurons=FLAGS.num_neurons,\
                              group_size=FLAGS.group_size,refr_per=FLAGS.ref_period)

        #COMPARISON WITH K-PAIRWISE AND DG MODELS (only for retinal data)
        if FLAGS.dataset == 'retina':
            print(
                'nearest sample analysis -----------------------------------')
            num_samples = 100  #this is for the 'nearest sample' analysis (Fig. S5)
            print('real_samples')
            analysis.nearest_sample(X_real=real_samples,
                                    fake_samples=real_samples,
                                    num_neurons=FLAGS.num_neurons,
                                    num_bins=FLAGS.num_bins,
                                    folder=FLAGS.sample_dir,
                                    name='real',
                                    num_samples=num_samples)
            ###################
            print('fake_samples')
            analysis.nearest_sample(X_real=real_samples,
                                    fake_samples=fake_samples.T,
                                    num_neurons=FLAGS.num_neurons,
                                    num_bins=FLAGS.num_bins,
                                    folder=FLAGS.sample_dir,
                                    name='spikeGAN',
                                    num_samples=num_samples)
            ###################
            k_pairwise_samples = retinal_data.load_samples_from_k_pairwise_model(
                num_samples=FLAGS.num_samples,
                num_bins=FLAGS.num_bins,
                num_neurons=FLAGS.num_neurons,
                instance=FLAGS.data_instance,
                folder=os.getcwd() + '/data/retinal data/')
            print('k_pairwise_samples')
            _, _, _, _, _ = analysis.get_stats(X=k_pairwise_samples,
                                               num_neurons=FLAGS.num_neurons,
                                               num_bins=FLAGS.num_bins,
                                               folder=FLAGS.sample_dir,
                                               name='k_pairwise',
                                               instance=FLAGS.data_instance)
            analysis.nearest_sample(X_real=real_samples,
                                    fake_samples=k_pairwise_samples,
                                    num_neurons=FLAGS.num_neurons,
                                    num_bins=FLAGS.num_bins,
                                    folder=FLAGS.sample_dir,
                                    name='k_pairwise',
                                    num_samples=num_samples)
            ###################
            DDG_samples = retinal_data.load_samples_from_DDG_model(
                num_samples=FLAGS.num_samples,
                num_bins=FLAGS.num_bins,
                num_neurons=FLAGS.num_neurons,
                instance=FLAGS.data_instance,
                folder=os.getcwd() + '/data/retinal data/')
            print('DDG_samples')
            _, _, _, _, _ = analysis.get_stats(X=DDG_samples,
                                               num_neurons=FLAGS.num_neurons,
                                               num_bins=FLAGS.num_bins,
                                               folder=FLAGS.sample_dir,
                                               name='DDG',
                                               instance=FLAGS.data_instance)
            analysis.nearest_sample(X_real=real_samples,
                                    fake_samples=DDG_samples,
                                    num_neurons=FLAGS.num_neurons,
                                    num_bins=FLAGS.num_bins,
                                    folder=FLAGS.sample_dir,
                                    name='DDG',
                                    num_samples=num_samples)
    for i in range(n_batches):
        yield X[batch_size*i:min(length,batch_size*(i+1))], y[batch_size*i:min(length,batch_size*(i+1))]
        
EPOCHS = 40
max_acc = 0
save_model_path = 'Traffice_sign_classifier'
logits = model(inputs)
logits = tf.identity(logits,name = 'logits')
cost = tf.reduce_mean(tf.losses.softmax_cross_entropy(labels,logits),name = 'cost')

optimizer = tf.train.AdamOptimizer(learning_rate=0.001).minimize(cost)

correct_pred = tf.equal(tf.argmax(labels,1),tf.argmax(logits,1))
accuracy = tf.reduce_mean(tf.cast(correct_pred,tf.float32),name = 'accuracy')
 
with tf.Session()  as sess:
    sess.run(tf.global_variables_initializer())
    for epoch in range(EPOCHS):
        for i,(X,y) in enumerate(get_batches(X_train,y_train)):
            sess.run(optimizer,feed_dict = {inputs:X,labels:y})
            if i%50==0:
                valid_acc=sess.run(accuracy, feed_dict={inputs:X_valid, labels:y_valid})
                train_acc=sess.run(accuracy, feed_dict={inputs:X, labels:y})
        print('epoch : ',epoch+1,' training accuracy is : ',train_acc,' valid accuracy is :',valid_acc)
        if valid_acc > max_acc:
            max_acc = valid_acc
            saver = tf.train.Saver(max_to_keep=1)
            save_path = saver.save(sess,save_model_path)

import pandas as pd
loaded_graph = tf.Graph()
Esempio n. 11
0
def main(_):
    tf.logging.set_verbosity(tf.logging.INFO)

    processors = {
        "sst-2": run_classifier.SST2Processor,
        "mnli": run_classifier.MnliProcessor
    }

    tokenization.validate_case_matches_checkpoint(FLAGS.do_lower_case,
                                                  FLAGS.init_checkpoint1)
    tokenization.validate_case_matches_checkpoint(FLAGS.do_lower_case,
                                                  FLAGS.init_checkpoint2)

    bert_config1 = modeling.BertConfig.from_json_file(FLAGS.bert_config_file1)
    bert_config2 = modeling.BertConfig.from_json_file(FLAGS.bert_config_file2)

    if FLAGS.max_seq_length > bert_config1.max_position_embeddings:
        raise ValueError(
            "Cannot use sequence length %d because the BERT model "
            "was only trained up to sequence length %d" %
            (FLAGS.max_seq_length, bert_config1.max_position_embeddings))

    task_name = FLAGS.task_name.lower()

    if task_name not in processors:
        raise ValueError("Task not found: %s" % (task_name, ))

    processor = processors[task_name]()

    input_ids = tf.placeholder(dtype=tf.int32,
                               shape=(None, FLAGS.max_seq_length))
    input_mask = tf.placeholder(dtype=tf.int32,
                                shape=(None, FLAGS.max_seq_length))
    segment_ids = tf.placeholder(dtype=tf.int32,
                                 shape=(None, FLAGS.max_seq_length))
    label_ids = tf.placeholder(dtype=tf.int32, shape=(None, ))
    num_labels = len(processor.get_labels())

    with tf.variable_scope("model1"):
        run_classifier.create_model(bert_config1,
                                    False,
                                    input_ids,
                                    input_mask,
                                    segment_ids,
                                    label_ids,
                                    num_labels,
                                    use_one_hot_embeddings=False)
    vars1 = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope="model1")

    with tf.variable_scope("model2"):
        run_classifier.create_model(bert_config2,
                                    False,
                                    input_ids,
                                    input_mask,
                                    segment_ids,
                                    label_ids,
                                    num_labels,
                                    use_one_hot_embeddings=False)
    vars2 = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope="model2")

    tf.train.init_from_checkpoint(FLAGS.init_checkpoint1, {
        "%s" % v.name[v.name.index("/") + 1:].split(":")[0]: v
        for v in vars1
    })

    tf.train.init_from_checkpoint(FLAGS.init_checkpoint2, {
        "%s" % v.name[v.name.index("/") + 1:].split(":")[0]: v
        for v in vars2
    })

    def abs_diff(var_name):
        with tf.variable_scope("model1", reuse=True):
            var1 = tf.get_variable(var_name)

        with tf.variable_scope("model2", reuse=True):
            var2 = tf.get_variable(var_name)

        return tf.math.abs(tf.math.subtract(var1, var2))

    def sq_diff(var_name):
        with tf.variable_scope("model1", reuse=True):
            var1 = tf.get_variable(var_name)

        with tf.variable_scope("model2", reuse=True):
            var2 = tf.get_variable(var_name)

        return tf.math.subtract(var1, var2) * tf.math.subtract(var1, var2)

    total_diff = 0.0
    total_params = 0

    bert_diff = 0.0
    bert_params = 0

    classifier_diff = 0.0
    classifier_params = 0

    for var in vars1:
        if FLAGS.diff_type == "euclidean":
            var_diff = tf.reduce_sum(
                sq_diff(var.name[var.name.index("/") + 1:var.name.index(":")]))
        else:
            var_diff = tf.reduce_sum(
                abs_diff(var.name[var.name.index("/") +
                                  1:var.name.index(":")]))

        var_params = 1
        shape = var.get_shape()
        for dim in shape:
            var_params *= dim

        total_diff += var_diff
        total_params += var_params

        # Setup for BERT parameters
        if "bert" in var.name:
            bert_diff += var_diff
            bert_params += var_params
        else:
            classifier_diff += var_diff
            classifier_params += var_params

    if FLAGS.diff_type == "euclidean":
        total_diff = tf.sqrt(total_diff)
        bert_diff = tf.sqrt(bert_diff)
        classifier_diff = tf.sqrt(classifier_diff)
    else:
        total_diff = total_diff / tf.cast(total_params, tf.float32)
        bert_diff = bert_diff / tf.cast(bert_params, tf.float32)
        classifier_diff = classifier_diff / tf.cast(classifier_params,
                                                    tf.float32)

    sess = tf.Session()
    sess.run(tf.global_variables_initializer())
    tf.logging.info("average diff in all params = %.8f", sess.run(total_diff))
    tf.logging.info("average diff in bert params = %.8f", sess.run(bert_diff))
    tf.logging.info("average diff in classifier params = %.8f",
                    sess.run(classifier_diff))

    return
Esempio n. 12
0
def train(cycle_gan_network, max_img, trainA, trainB, lr_rate, shape,
          pool_size, model_dir, images_dir):
    saver = tf.train.Saver(max_to_keep=None)
    lenA = len(trainA)
    lenB = len(trainB)
    epoch = 0
    summ_count = 0
    wint_count = 0
    num_imgs = 0
    poolA = np.zeros((pool_size, 1, shape[0], shape[1], shape[2]))
    poolB = np.zeros((pool_size, 1, shape[0], shape[1], shape[2]))

    with tf.Session() as session:
        session.run(tf.global_variables_initializer())
        #saver.restore(session,model_dir+"try_60\\")
        while epoch < 201:
            if epoch >= 100:
                lr_rate = 0.0002 - ((epoch - 100) * 0.0002) / 100

            for step in range(max_img):

                if summ_count >= lenA:
                    summ_count = 0
                    random.shuffle(trainA)

                if wint_count >= lenB:
                    wint_count = 0
                    random.shuffle(trainB)

                summer_image = Fetch_New_Image(trainA[summ_count])
                winter_image = Fetch_New_Image(trainB[wint_count])

                summ_count = summ_count + 1
                wint_count = wint_count + 1

                summer_image = np.reshape(summer_image,
                                          (1, shape[0], shape[1], shape[2]))
                winter_image = np.reshape(winter_image,
                                          (1, shape[0], shape[1], shape[2]))

                _, genB, genA_loss, _, genA, genB_loss, cyclicA, cyclicB = session.run(
                    [
                        cycle_gan_network.genA_opt, cycle_gan_network.gen_B,
                        cycle_gan_network.gen_loss_A,
                        cycle_gan_network.genB_opt, cycle_gan_network.gen_A,
                        cycle_gan_network.gen_loss_B,
                        cycle_gan_network.cyclicA, cycle_gan_network.cyclicB
                    ],
                    feed_dict={
                        cycle_gan_network.input_A: summer_image,
                        cycle_gan_network.input_B: winter_image,
                        cycle_gan_network.lr_rate: lr_rate
                    })

                poolA, poolB, num_imgs = save_to_pool(poolA, poolB, genA, genB,
                                                      pool_size, num_imgs)

                indA = random.randint(0, (min(pool_size, num_imgs) - 1))
                indB = random.randint(0, (min(pool_size, num_imgs) - 1))
                fakeA_img = poolA[indA]
                fakeB_img = poolB[indB]

                _, discA_loss, _, discB_loss = session.run(
                    [
                        cycle_gan_network.discA_opt,
                        cycle_gan_network.disc_loss_A,
                        cycle_gan_network.discB_opt,
                        cycle_gan_network.disc_loss_B
                    ],
                    feed_dict={
                        cycle_gan_network.input_A: summer_image,
                        cycle_gan_network.input_B: winter_image,
                        cycle_gan_network.lr_rate: lr_rate,
                        cycle_gan_network.fake_pool_Aimg: fakeA_img,
                        cycle_gan_network.fake_pool_Bimg: fakeB_img
                    })

                if step % 50 == 0:
                    print(
                        "epoch = %r step = %r discA_loss = %r genA_loss = %r discB_loss = %r genB_loss = %r"
                        % (epoch, step, discA_loss, genA_loss, discB_loss,
                           genB_loss))

                if step % 150 == 0:
                    images = [genA, cyclicB, genB, cyclicA]
                    img_ind = 0
                    for img in images:
                        img = np.reshape(img, (shape[0], shape[1], shape[2]))
                        if np.array_equal(img.max(), img.min()) == False:
                            img = (((img - img.min()) * 255) /
                                   (img.max() - img.min())).astype(np.uint8)
                        else:
                            img = ((img - img.min()) * 255).astype(np.uint8)
                        scipy.misc.toimage(
                            img, cmin=0.0,
                            cmax=...).save(images_dir + "/img_" +
                                           str(img_ind) + "_" + str(epoch) +
                                           "_" + str(step) + ".jpg")
                        img_ind = img_ind + 1

                print("step = %r" % (step))

            if epoch % 50 == 0:
                saver.save(session,
                           model_dir + "/try_" + str(epoch) + "/",
                           write_meta_graph=True)

            epoch = epoch + 1
Esempio n. 13
0
#1.0 버전으로 변경
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()

# 2.0 버전
# import tensorflow as tf

hello = tf.constant("hello")

sess = tf.Session()

print(sess.run(hello))

Esempio n. 14
0
def test_decompress(args):
    """Decompresses an image."""

    # Read the shape information and compressed string from the binary file.
    string = tf.placeholder(tf.string, [1])
    side_string = tf.placeholder(tf.string, [1])
    x_shape = tf.placeholder(tf.int32, [2])
    y_shape = tf.placeholder(tf.int32, [2])
    z_shape = tf.placeholder(tf.int32, [2])
    with open(args.input_file, "rb") as f:
        packed = tfc.PackedTensors(f.read())
    tensors = [string, side_string, x_shape, y_shape, z_shape]
    arrays = packed.unpack(tensors)

    # Instantiate model.
    synthesis_transform = SynthesisTransform(args.num_filters)
    hyper_synthesis_transform = HyperSynthesisTransform(args.num_filters)
    entropy_bottleneck = tfc.EntropyBottleneck(dtype=tf.float32)

    # Decompress and transform the image back.
    z_shape = tf.concat([z_shape, [args.num_filters]], axis=0)
    z_hat = entropy_bottleneck.decompress(side_string,
                                          z_shape,
                                          channels=args.num_filters)
    sigma = hyper_synthesis_transform(z_hat)
    sigma = sigma[:, :y_shape[0], :y_shape[1], :]
    scale_table = np.exp(
        np.linspace(np.log(SCALES_MIN), np.log(SCALES_MAX), SCALES_LEVELS))
    conditional_bottleneck = tfc.GaussianConditional(sigma,
                                                     scale_table,
                                                     dtype=tf.float32)
    y_hat_all = conditional_bottleneck.decompress(string)

    x = read_png("kodak/kodim01.png")
    x = tf.expand_dims(x, 0)
    x.set_shape([1, None, None, 3])
    x_shape = tf.shape(x)
    x *= 255

    active = 192
    y_hat = y_hat_all[:, :, :, :active]
    x_hat = synthesis_transform(y_hat)
    x_hat = tf.clip_by_value(x_hat, 0, 1)
    x_hat = tf.round(x_hat * 255)
    mse = tf.reduce_mean(tf.squared_difference(x, x_hat))
    psnr = tf.squeeze(tf.image.psnr(x_hat, x, 255))
    msssim = tf.squeeze(tf.image.ssim_multiscale(x_hat, x, 255))

    #x_hat = x_hat[0, :x_shape[0], :x_shape[1], :]
    #op = write_png(args.output_file, x_hat)

    sess = tf.Session()
    latest = tf.train.latest_checkpoint(checkpoint_dir=args.checkpoint_dir)
    tf.train.Saver().restore(sess, save_path=latest)
    #sess.run(op, feed_dict=dict(zip(tensors, arrays)))

    #vmse, vpsnr, vmsssim = sess.run([mse, psnr, msssim], feed_dict=dict(zip(tensors, arrays)))
    #print(vmse, vpsnr, vmsssim)

    for active in range(192, 0, -8):
        y_hat = y_hat_all[:, :, :, :active]
        x_hat = synthesis_transform(y_hat)
        x_hat = tf.clip_by_value(x_hat, 0, 1)
        x_hat = tf.round(x_hat * 255)
        mse = tf.reduce_mean(tf.squared_difference(x, x_hat))
        psnr = tf.squeeze(tf.image.psnr(x_hat, x, 255))
        msssim = tf.squeeze(tf.image.ssim_multiscale(x_hat, x, 255))
        vmse, vpsnr, vmsssim = sess.run([mse, psnr, msssim],
                                        feed_dict=dict(zip(tensors, arrays)))
        print(active, vmse, vpsnr, vmsssim)
Esempio n. 15
0
def eval_once(saver, summary_writer, labels, loss1, logits1, loss2, logits2,
              loss3, logits3, loss4, logits4, loss5, logits5, loss6, logits6,
              summary_op):
    """Run Eval once.

  Args:
    saver: Saver.
    summary_writer: Summary writer.
    top_k_op: Top K op.
    summary_op: Summary op.
  """
    with tf.Session() as sess:
        ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)
        if ckpt and ckpt.model_checkpoint_path:
            # Restores from checkpoint
            saver.restore(sess, ckpt.model_checkpoint_path)
            # Assuming model_checkpoint_path looks something like:
            #   /my-favorite-path/cifar10_train/model.ckpt-0,
            # extract global_step from it.
            global_step = ckpt.model_checkpoint_path.split('/')[-1].split(
                '-')[-1]
            print('~~~~~~~~~~~checkpoint file found at step %s' % global_step)
        else:
            print('No checkpoint file found')
            return

        # Start the queue runners.
        coord = tf.train.Coordinator()
        try:
            threads = []
            for qr in tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS):
                threads.extend(
                    qr.create_threads(sess,
                                      coord=coord,
                                      daemon=True,
                                      start=True))

            num_iter = int(math.ceil(FLAGS.num_examples / batch_size))
            cnt = 0
            step = 0
            accuracy = 0
            accuracies = np.zeros(num * 2)
            cnts = np.zeros(num * 2)
            steps = np.zeros(num * 2)
            comsample_labels = np.zeros(2 * batch_size)
            simpleness = np.zeros((num, 2 * batch_size))
            concur_s = np.zeros((num, num))
            while step < num_iter and not coord.should_stop():
                #print('!!!!!!the step', step)

                #local test

                if int(step / 2) == 0:
                    #print('~~~~loss1')
                    samplelabels, predictions, precision = sess.run(
                        [labels, logits1, loss1])
                elif int(step / 2) == 1:
                    #print('~~~~loss2')
                    samplelabels, predictions, precision = sess.run(
                        [labels, logits2, loss2])
                elif int(step / 2) == 2:
                    #print('~~~~loss3')
                    samplelabels, predictions, precision = sess.run(
                        [labels, logits3, loss3])
                elif int(step / 2) == 3:
                    #print('~~~~loss4')
                    samplelabels, predictions, precision = sess.run(
                        [labels, logits4, loss4])
                elif int(step / 2) == 4:
                    #print('~~~~loss5')
                    samplelabels, predictions, precision = sess.run(
                        [labels, logits5, loss5])
                elif int(step / 2) == 5:
                    #print('~~~~loss6')
                    samplelabels, predictions, precision = sess.run(
                        [labels, logits6, loss6])

                #test on 7
                if int(step / 2) == 6:
                    #print('~~~~loss1')
                    samplelabels, predictions, precision = sess.run(
                        [labels, logits1, loss1])
                elif int(step / 2) == 7:
                    #print('~~~~loss2')
                    samplelabels, predictions, precision = sess.run(
                        [labels, logits2, loss2])
                elif int(step / 2) == 8:
                    #print('~~~~loss3')
                    samplelabels, predictions, precision = sess.run(
                        [labels, logits3, loss3])
                elif int(step / 2) == 9:
                    #print('~~~~loss4')
                    samplelabels, predictions, precision = sess.run(
                        [labels, logits4, loss4])
                elif int(step / 2) == 10:
                    #print('~~~~loss5')
                    samplelabels, predictions, precision = sess.run(
                        [labels, logits5, loss5])
                elif int(step / 2) == 11:
                    #print('~~~~loss6')
                    samplelabels, predictions, precision = sess.run(
                        [labels, logits6, loss6])

                #print('!!!!!!the index of t/????????????????/he whole batch %f /n' % output3[0][0][0])
                """
        if step == 5:         
          ndar = np.array(output)
          np.savetxt("testout.csv", ndar.reshape(128,256), delimiter=",")
          ndar = np.array(output2)
          np.savetxt("testlabel.csv", ndar.reshape(128,256), delimiter=",")
          ndar = np.array(output3)
          np.savetxt("testsignal.csv", ndar.reshape(128,256), delimiter=",")
        """
                #print(samplelabels.shape)
                #print(predictions.shape)
                n_acc = 0
                l = 0.0
                for i in range(0, batch_size):
                    #print('label:',samplelabels.shape)
                    #print('prediction:',predictions.shape)
                    if int(samplelabels[i][0][0]) == np.argmax(predictions[i]):
                        n_acc = n_acc + 1

                    i_loss = -math.log(predictions[i][int(
                        samplelabels[i][0][0])])
                    if int(step / 2) >= num:
                        simpleness[int(step / 2) -
                                   num][int(step % 2) * batch_size +
                                        i] = i_loss
                        comsample_labels[int(step % 2) * batch_size +
                                         i] = samplelabels[i][0][0]
                        '''
                if l<precision:
                    simpleness[int(step/2)][int(step%2)*batch_size+i]=1
                '''
                    l += i_loss
                accuracies[int(step / 2)] += 100.0 * n_acc / (2 * batch_size)
                cnts[int(step / 2)] += l / (2 * batch_size)
                steps[int(step / 2)] += 1
                step += 1
            #compute the simpleness matrix
            #print('::::::::::::comsample_labels: ')
            #print(comsample_labels)
            w_loss = [0.17, 0.17, 0.14, 0.22, 0.14, 0.16]
            for i in range(0, num):
                for j in range(0, 2 * batch_size):
                    #error magin
                    simpleness[i][j] = 1 - simpleness[i][j]
                    '''
          if simpleness[i][j]<m_loss[i]:
            simpleness[i][j]=1
          else:
            simpleness[i][j]=0
          '''
            #compute concurrent_simpleness
            for i in range(0, num):
                for j in range(0, num):
                    if i != j:
                        #concurrent simpleness between user_i and user_j
                        for n in range(0, 2 * batch_size):
                            concur_s[i][j] += (
                                (simpleness[i][n] - simpleness[j][n]) * 100.0
                            ) * ((simpleness[i][n] - simpleness[j][n]) * 100.0)
                        concur_s[i][j] = math.sqrt(
                            concur_s[i][j]) / (2 * batch_size)
                        #difference[i][j]=1-concur_s[i][j]
            print('concurrent simpleness: ')
            print(concur_s)

            i = 0
            while i < num:
                print(
                    '!!!!!!!!!!!!!!!!!!!! subject %s (%s records): test loss = %.3f, accuracy=%.3f'
                    % (i + 1, steps[i], cnts[i], accuracies[i]))
                i += 1

            print(
                '(locally test)!!!!!!!!!!!!!!!!!!!! average_test loss = %.3f, average_accuracy=%.3f'
                % (np.mean(cnts[0:num]), np.mean(accuracies[0:num])))

            print(
                '(test on 7)!!!!!!!!!!!!!!!!!!!! average_test loss = %.3f, average_accuracy=%.3f'
                %
                (np.mean(cnts[num:2 * num]), np.mean(accuracies[num:2 * num])))

            summary = tf.Summary()
            summary.ParseFromString(sess.run(summary_op))
            summary.value.add(tag='loss @ 1', simple_value=precision)
            summary_writer.add_summary(summary, global_step)
        except Exception as e:  # pylint: disable=broad-except
            coord.request_stop(e)

        coord.request_stop()
        coord.join(threads, stop_grace_period_secs=10)
Esempio n. 16
0
def classify_apps(apk_folder_directory):
    '''
    Classify apps as benign or malicious apps.

    Parameters
    ----------
    apk_folder_directory : the directory of the folder containing APKs for scanning

    Returns
    -------
    prediction_dictionary : a dictionary recording classification results (keys for APK names and values for corresponding results - 0
        represents a benign app, while 1 represents malware)
    '''

    data_X, data_Y, package_name_list = load_data(apk_folder_directory)

    learning_rate = 0.00001
    batch_size = 1
    display_step = 1
    dimension_count = 86645
    input_size = 50
    class_count = 2

    tf.disable_eager_execution()
    X = tf.placeholder(tf.float32, [None, input_size, dimension_count, 1])
    Y = tf.placeholder(tf.float32, [None, class_count])

    prediction = conv_net(X)  # build the net
    init_op = tf.global_variables_initializer(
    )  # intialise the variables to assign their default values
    saver = tf.train.Saver()

    with tf.Session() as session:
        session.run(init_op)

        saver.restore(session, get_cnn_trainer_saver_path())

        batch_count = int(data_X.shape[0] / batch_size)
        prediction_dictionary = {}

        for step in range(batch_count):
            batch_x = data_X[step * batch_size:(step + 1) * batch_size]
            batch_y = data_Y[step * batch_size:(step + 1) * batch_size]
            batch_package_name_list = package_name_list[step *
                                                        batch_size:(step + 1) *
                                                        batch_size]

            batch_x = decompress(batch_x, dimension_count)
            batch_y = get_one_hot_vector(batch_size, batch_y)

            batch_result_list = session.run(tf.argmax(prediction, 1),
                                            feed_dict={X: batch_x})

            package_name_index = 0

            for start_index in range(0, len(batch_result_list), input_size):
                prediction_dictionary[
                    batch_package_name_list[package_name_index]] = np.int16(
                        batch_result_list[start_index:start_index +
                                          input_size].max()
                    ).item(
                    )  # convert the "numpy.int64" type to the native "int" type
                package_name_index += 1

        return prediction_dictionary
Esempio n. 17
0
def fit_linear_regression(trainX,
                          trainY,
                          learning_rate=0.01,
                          ridge_alpha=0.0,
                          lasso_alpha=0.0,
                          max_iter=1000,
                          display_step=50,
                          converged_tol=1e-8,
                          to_print=False):
    # disable eager execution
    tf.disable_eager_execution()

    # check dimensions
    assert trainX.shape[0] == trainY.shape[
        0]  # number of training data the same
    nbtrain = trainX.shape[0]
    nbfeatures = trainX.shape[1]

    # placeholder
    X = tf.placeholder(tf.float32, shape=(None, nbfeatures), name='X')
    Y = tf.placeholder(tf.float32, shape=(None, 1), name='Y')

    # fitting parameters
    theta = tf.Variable(np.random.uniform(size=(nbfeatures, 1)),
                        name='theta',
                        dtype='float')
    b = tf.Variable(np.random.uniform(), name='b', dtype='float')

    # fitting function
    pred_Y = tf.matmul(X, theta) + b

    # cost function
    cost = tf.reduce_mean(tf.square(pred_Y - Y))
    # regularization
    if ridge_alpha is not None and ridge_alpha != 0:
        cost += 0.5 * ridge_alpha * (tf.reduce_sum(tf.square(theta)) +
                                     tf.square(b))
    if lasso_alpha is not None and lasso_alpha != 0:
        cost += lasso_alpha * (tf.reduce_sum(tf.abs(theta)) + tf.abs(b))

    # training the machine
    optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
    initializer = tf.global_variables_initializer()

    sess = tf.Session()
    sess.run(initializer)

    old_cost = sess.run(cost, feed_dict={X: trainX, Y: trainY})

    # Fit all training data
    for epoch in range(max_iter):
        sess.run(optimizer, feed_dict={X: trainX, Y: trainY})

        if to_print:
            # Display logs per epoch step
            if (epoch + 1) % display_step == 0:
                c = sess.run(cost, feed_dict={X: trainX, Y: trainY})
                print("Epoch:", '%04d' % (epoch + 1), "cost=", "{:.9f}".format(c), \
                      "theta=", sess.run(theta), "b=", sess.run(b))

        if converged_tol is not None:
            new_cost = sess.run(cost, feed_dict={X: trainX, Y: trainY})
            if abs(new_cost - old_cost) < converged_tol:
                break
            else:
                old_cost = new_cost

    if to_print:
        print("Optimization Finished!")

    # extract value
    training_cost = sess.run(cost, feed_dict={X: trainX, Y: trainY})
    trained_theta = sess.run(theta)
    trained_b = sess.run(b)

    fitted_params = {
        'theta': trained_theta,
        'b': trained_b,
        'cost': training_cost,
        'nbepoch': epoch,
        'nbfeatures': nbfeatures,
        'nbtrain': nbtrain
    }
    tf_sess = {'session': sess, 'inputs': X, 'outputs': pred_Y}

    return fitted_params, tf_sess
Esempio n. 18
0
image_list = image_list[:, :224, :224, :]
print("truncating image sizes to 224x224")

# which_network = "celeba"
which_network = "imagenet"
print("using", which_network, "weigths")
if which_network == "imagenet":
    pb_file = "1.pb"
elif which_network == "celeba":
    pb_file = "200.pb"
else:
    assert False

# Reading network
sess = tf.Session()
with open(pb_file, "rb") as f:
    graph_def = tf.GraphDef()
    graph_def.ParseFromString(f.read())
    sess = tf.Session(tf.import_graph_def(graph_def, name=''))

# import graph_def
with tf.Graph().as_default() as graph:
    tf.import_graph_def(graph_def)

# Getting the list of convolutional tensors
tensors = list_tensors(sess.graph)

image_counter = 0
mx = np.random.rand(0, 0)
batch_size = 128
Esempio n. 19
0
def main(unused_argv):
    """Main function for running experiments."""
    # Load data
    (train_adj, full_adj, train_feats, test_feats, y_train, y_val, y_test,
     train_mask, val_mask, test_mask, _, val_data, test_data, num_data,
     visible_data) = load_data(FLAGS.data_prefix, FLAGS.dataset, FLAGS.precalc)

    # Partition graph and do preprocessing
    if FLAGS.bsize > 1:
        _, parts = partition_utils.partition_graph(train_adj, visible_data,
                                                   FLAGS.num_clusters)
        parts = [np.array(pt) for pt in parts]
    else:
        (parts, features_batches, support_batches, y_train_batches,
         train_mask_batches) = utils.preprocess(train_adj, train_feats,
                                                y_train, train_mask,
                                                visible_data,
                                                FLAGS.num_clusters,
                                                FLAGS.diag_lambda)

    (_, val_features_batches, val_support_batches, y_val_batches,
     val_mask_batches) = utils.preprocess(full_adj, test_feats, y_val,
                                          val_mask, np.arange(num_data),
                                          FLAGS.num_clusters_val,
                                          FLAGS.diag_lambda)

    (_, test_features_batches, test_support_batches, y_test_batches,
     test_mask_batches) = utils.preprocess(full_adj, test_feats, y_test,
                                           test_mask, np.arange(num_data),
                                           FLAGS.num_clusters_test,
                                           FLAGS.diag_lambda)
    idx_parts = list(range(len(parts)))

    # Some preprocessing
    model_func = models.GCN

    # Define placeholders
    placeholders = {
        'support': tf.sparse_placeholder(tf.float32),
        'features': tf.placeholder(tf.float32),
        'labels': tf.placeholder(tf.float32, shape=(None, y_train.shape[1])),
        'labels_mask': tf.placeholder(tf.int32),
        'dropout': tf.placeholder_with_default(0., shape=()),
        'num_features_nonzero':
        tf.placeholder(tf.int32)  # helper variable for sparse dropout
    }

    # Create model
    model = model_func(placeholders,
                       input_dim=test_feats.shape[1],
                       logging=True,
                       multilabel=FLAGS.multilabel,
                       norm=FLAGS.layernorm,
                       precalc=FLAGS.precalc,
                       num_layers=FLAGS.num_layers)

    # Initialize session
    sess = tf.Session()
    tf.set_random_seed(seed)

    # Init variables
    sess.run(tf.global_variables_initializer())
    saver = tf.train.Saver()
    cost_val = []
    total_training_time = 0.0
    # Train model
    for epoch in range(FLAGS.epochs):
        t = time.time()
        np.random.shuffle(idx_parts)
        if FLAGS.bsize > 1:
            (features_batches, support_batches, y_train_batches,
             train_mask_batches) = utils.preprocess_multicluster(
                 train_adj, parts, train_feats, y_train, train_mask,
                 FLAGS.num_clusters, FLAGS.bsize, FLAGS.diag_lambda)
            for pid in range(len(features_batches)):
                # Use preprocessed batch data
                features_b = features_batches[pid]
                support_b = support_batches[pid]
                y_train_b = y_train_batches[pid]
                train_mask_b = train_mask_batches[pid]
                # Construct feed dictionary
                feed_dict = utils.construct_feed_dict(features_b, support_b,
                                                      y_train_b, train_mask_b,
                                                      placeholders)
                feed_dict.update({placeholders['dropout']: FLAGS.dropout})
                # Training step
                outs = sess.run([model.opt_op, model.loss, model.accuracy],
                                feed_dict=feed_dict)
        else:
            np.random.shuffle(idx_parts)
            for pid in idx_parts:
                # Use preprocessed batch data
                features_b = features_batches[pid]
                support_b = support_batches[pid]
                y_train_b = y_train_batches[pid]
                train_mask_b = train_mask_batches[pid]
                # Construct feed dictionary
                feed_dict = utils.construct_feed_dict(features_b, support_b,
                                                      y_train_b, train_mask_b,
                                                      placeholders)
                feed_dict.update({placeholders['dropout']: FLAGS.dropout})
                # Training step
                outs = sess.run([model.opt_op, model.loss, model.accuracy],
                                feed_dict=feed_dict)

        total_training_time += time.time() - t
        print_str = 'Epoch: %04d ' % (
            epoch + 1) + 'training time: {:.5f} '.format(
                total_training_time) + 'train_acc= {:.5f} '.format(outs[2])

        # Validation
        if FLAGS.validation:
            cost, acc, micro, macro = evaluate(sess, model,
                                               val_features_batches,
                                               val_support_batches,
                                               y_val_batches, val_mask_batches,
                                               val_data, placeholders)
            cost_val.append(cost)
            print_str += 'val_acc= {:.5f} '.format(
                acc) + 'mi F1= {:.5f} ma F1= {:.5f} '.format(micro, macro)

        tf.logging.info(print_str)

        if epoch > FLAGS.early_stopping and cost_val[-1] > np.mean(
                cost_val[-(FLAGS.early_stopping + 1):-1]):
            tf.logging.info('Early stopping...')
            break

    tf.logging.info('Optimization Finished!')

    # Save model
    saver.save(sess, FLAGS.save_name)

    # Load model (using CPU for inference)
    with tf.device('/cpu:0'):
        sess_cpu = tf.Session(config=tf.ConfigProto(device_count={'GPU': 0}))
        sess_cpu.run(tf.global_variables_initializer())
        saver = tf.train.Saver()
        saver.restore(sess_cpu, FLAGS.save_name)
        # Testing
        test_cost, test_acc, micro, macro = evaluate(
            sess_cpu, model, test_features_batches, test_support_batches,
            y_test_batches, test_mask_batches, test_data, placeholders)
        print_str = 'Test set results: ' + 'cost= {:.5f} '.format(
            test_cost) + 'accuracy= {:.5f} '.format(
                test_acc) + 'mi F1= {:.5f} ma F1= {:.5f}'.format(micro, macro)
        tf.logging.info(print_str)
Esempio n. 20
0
                        GLOBAL_RUNNING_R.append(ep_r)
                    else:
                        GLOBAL_RUNNING_R.append(0.99 * GLOBAL_RUNNING_R[-1] +
                                                0.01 * ep_r)
                    print(
                        self.name,
                        "Ep:",
                        GLOBAL_EP,
                        "| Ep_r: %i" % GLOBAL_RUNNING_R[-1],
                    )
                    GLOBAL_EP += 1
                    break


if __name__ == "__main__":
    SESS = tf.Session()

    with tf.device("/cpu:0"):
        OPT_A = tf.train.RMSPropOptimizer(LR_A, name='RMSPropA')
        OPT_C = tf.train.RMSPropOptimizer(LR_C, name='RMSPropC')
        GLOBAL_AC = ACNet(GLOBAL_NET_SCOPE)  # we only need its params
        workers = []
        # Create worker
        for i in range(N_WORKERS):
            i_name = 'W_%i' % i  # worker name
            workers.append(Worker(i_name, GLOBAL_AC))

    COORD = tf.train.Coordinator()
    SESS.run(tf.global_variables_initializer())

    if OUTPUT_GRAPH:
Esempio n. 21
0
    def inference(self, image_path_pattern: Text, output_dir: Text, **kwargs):
        """Read and preprocess input images.

    Args:
      image_path_pattern: Image file pattern such as /tmp/img*.jpg
      output_dir: the directory for output images. Output images will be named
        as 0.jpg, 1.jpg, ....
      **kwargs: extra parameters for for vistualization, such as
        min_score_thresh, max_boxes_to_draw, and line_thickness.

    Returns:
      Annotated image.
    """
        params = copy.deepcopy(self.params)
        with tf.Session() as sess:
            # Buid inputs and preprocessing.
            raw_images, images, scales = build_inputs(image_path_pattern,
                                                      params['image_size'])

            # Build model.
            class_outputs, box_outputs = build_model(self.model_name, images,
                                                     **self.params)
            restore_ckpt(sess,
                         self.ckpt_path,
                         enable_ema=self.enable_ema,
                         export_ckpt=None)
            # for postprocessing.
            params.update(
                dict(batch_size=len(raw_images),
                     disable_pyfun=self.disable_pyfun))

            # Build postprocessing.
            detections_batch = det_post_process(
                params,
                class_outputs,
                box_outputs,
                scales,
                min_score_thresh=kwargs.get('min_score_thresh', 0.2),
                max_boxes_to_draw=kwargs.get('max_boxes_to_draw', 50))
            outputs_np = sess.run(detections_batch)
            # Visualize results.
            for i, output_np in enumerate(outputs_np):
                # output_np has format [image_id, y, x, height, width, score, class]
                boxes = output_np[:, 1:5]
                classes = output_np[:, 6].astype(int)
                scores = output_np[:, 5]

                # This is not needed if disable_pyfun=True
                # convert [x, y, width, height] to [ymin, xmin, ymax, xmax]
                # TODO(tanmingxing): make this convertion more efficient.
                if not self.disable_pyfun:
                    boxes[:, [0, 1, 2, 3]] = boxes[:, [1, 0, 3, 2]]

                boxes[:, 2:4] += boxes[:, 0:2]
                img = visualize_image(raw_images[i], boxes, classes, scores,
                                      self.label_id_mapping, **kwargs)
                output_image_path = os.path.join(output_dir, str(i) + '.jpg')
                Image.fromarray(img).save(output_image_path)
                logging.info('writing file to %s', output_image_path)

            return outputs_np
Esempio n. 22
0
    def run(self):
        # Save the log file
        log = open(os.path.join(self.directory, 'log.txt'), 'w')
        log.write("Epochs: {}\n".format(self.epochs))
        log.write("Learning Rate: {}\n".format(self.learning_rate))
        log.write("L1 Hidden Nodes: {}\n".format(self.l1_hidden_nodes))
        log.write("L2 Hidden Nodes: {}\n".format(self.l2_hidden_nodes))
        log.write("Batch Size: {}\n".format(self.batch_size))
        log.write("\n")
        X = tf.placeholder(tf.float32, [None, self.n_input])
        Y = tf.placeholder(tf.float32, [None, self.n_classes])

        layer_1 = tf.nn.relu(self.fc(X, self.l1_hidden_nodes))
        layer_2 = tf.nn.relu(self.fc(layer_1, self.l2_hidden_nodes))
        layer_out = self.fc(layer_2, self.n_classes)

        logits = layer_out
        prediction = tf.nn.softmax(logits, name='prediction')

        loss_op = tf.reduce_mean(
            tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits,
                                                       labels=Y))
        optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate)
        train_op = optimizer.minimize(loss_op)

        correct_prediction = tf.equal(tf.argmax(prediction, 1),
                                      tf.argmax(Y, 1))
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

        init_op = tf.global_variables_initializer()

        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        with tf.Session(config=config) as sess:
            sess.run(init_op)
            total_batch = int(len(self.training_data) / self.batch_size) + 1
            batch_x = []
            batch_y = []
            for i in range(total_batch):
                batch_x.append(
                    np.array(self.training_data[self.batch_size *
                                                i:self.batch_size * (i + 1)]))
                batch_y.append(
                    np.array(
                        self.training_labels[self.batch_size *
                                             i:self.batch_size * (i + 1)]))
            for epoch in range(self.epochs):
                avg_loss = 0
                avg_acc = 0
                for i in range(total_batch):
                    sess.run([train_op],
                             feed_dict={
                                 X: batch_x[i],
                                 Y: batch_y[i]
                             })
                    if epoch % 100 == 0:
                        loss, acc = sess.run([loss_op, accuracy],
                                             feed_dict={
                                                 X: batch_x[i],
                                                 Y: batch_y[i]
                                             })
                        avg_loss += loss / total_batch
                        avg_acc += acc / total_batch
                if epoch % 100 == 0:
                    message = 'Epoch: {} Avg Batch [loss: {:.4f}  acc: {:.3f}]'.format(
                        epoch, avg_loss, avg_acc)
                    self.feedback.emit('Train', message)
                    log.write(message + "\n")
                self.progress.emit(epoch + 1)
                if self.stop:
                    self.feedback.emit('Train', 'Training interrupted.')
                    log.write('Training interrupted.')
                    break

            pred_train = sess.run(prediction,
                                  feed_dict={X: self.training_data})
            pred_validation = sess.run(prediction,
                                       feed_dict={X: self.validation_data})

            message = "Train Acc: {:.5f}".format(
                sess.run(accuracy,
                         feed_dict={
                             X: self.training_data,
                             Y: self.training_labels
                         }))
            self.feedback.emit('Train', message)
            log.write("\n" + message + "\n")

            data, header = self.confusion_matrix(pred_train,
                                                 self.training_labels)
            output = tabulate(data, headers=header, tablefmt='orgtbl')
            log.write("Training data confusion matrix\n")
            log.write(output + "\n\n")

            message = "Validation Acc: {:.5f}".format(
                sess.run(accuracy,
                         feed_dict={
                             X: self.validation_data,
                             Y: self.validation_labels
                         }))
            self.feedback.emit('Train', message)
            log.write(message + "\n")

            data, header = self.confusion_matrix(pred_validation,
                                                 self.validation_labels)
            output = tabulate(data, headers=header, tablefmt='orgtbl')
            log.write("Training data confusion matrix\n")
            log.write(output + "\n")
            self.feedback.emit('Train', 'See log for confusion matrix')

            # Save the Model
            saver = tf.train.Saver()
            saver.save(sess, os.path.join(self.directory, 'model'))

            # Save the metadata
            file = open(os.path.join(self.directory, 'nenetic-metadata.json'),
                        'w')
            package = {
                'classes': self.classes,
                'colors': self.colors,
                'extractor': self.extractor
            }
            json.dump(package, file)
            file.close()

            # Close log file
            log.close()
Esempio n. 23
0
def neural_network(num_iter=1000):
    tf.disable_v2_behavior()

    # Reset the graph
    tf.reset_default_graph()

    # Setting a seed
    tf.set_random_seed(4155)
    # Construct each possible point pair (x,t) to feed the neural network
    Nx = 10
    Nt = 10
    x = np.linspace(0, 1, Nx)  #from 0 to 1 (sin function)
    t = np.linspace(0, 1, Nt)

    X, T = np.meshgrid(x, t)

    x_ = (X.ravel()).reshape(-1, 1)
    t_ = (T.ravel()).reshape(-1, 1)

    x_tf = tf.convert_to_tensor(x_)
    t_tf = tf.convert_to_tensor(t_)  #converts x and t to tensors

    points = tf.concat([x_tf, t_tf], 1)  #concatenates to one dimention

    num_hidden_neurons = [20, 20]
    num_hidden_layers = np.size(num_hidden_neurons)

    with tf.variable_scope('nn'):  #DeepNeuralNetwork
        # Input layer
        previous_layer = points

        # Hidden layers
        for l in range(num_hidden_layers):
            current_layer = tf.layers.dense(previous_layer, \
                                            num_hidden_neurons[l],\
                                            activation=tf.nn.sigmoid)
            previous_layer = current_layer

        # Output layer
        nn_output = tf.layers.dense(previous_layer, 1)
        #Dense implements the operation:
        #output = activation(dot(input, kernel) + bias)

    # Set up the cost function
    def u(x):
        return tf.sin(np.pi * x)  #This is initial condition

    #Trial solution
    with tf.name_scope('cost'):
        trial = (1 - t_tf) * u(x_tf) + x_tf * (1 - x_tf) * t_tf * nn_output

        trial_dt = tf.gradients(trial, t_tf)
        trial_d2x = tf.gradients(tf.gradients(trial, x_tf), x_tf)

        err = tf.square(trial_dt[0] - trial_d2x[0])
        cost = tf.reduce_sum(err, name='cost')

    # Define how the neural network should be trained
    learning_rate = 0.001
    with tf.name_scope('train'):
        optimizer = tf.train.AdamOptimizer(learning_rate)
        traning_op = optimizer.minimize(cost)
        #Adam is an optimization algorithm that can be used instead of the classical
        #stochastic gradient descent procedure to update network weights iterative
        #based in training data. Could also use GradientDescentOptimizer

    # Reference variable to the output from the network
    u_nn = None

    # Define a node that initializes all the nodes within the computational graph
    # for TensorFlow to evaluate
    init = tf.global_variables_initializer()

    with tf.Session() as sess:  #Class for tunning tensorflow operations
        # Initialize the computational graph
        init.run()

        #print('Initial cost: %g'%cost.eval())

        for i in range(num_iter):
            sess.run(traning_op)

        #print('Final cost: %g'%cost.eval())

        u_nn = trial.eval()

    u_e = np.exp(-np.pi**2 * t_) * np.sin(
        np.pi * x_)  #exact/analytical solution

    U_nn = u_nn.reshape(
        (Nt, Nx)).T  #de første og den siste er 0. Hva betyr det?
    U_e = u_e.reshape((Nt, Nx)).T

    return U_nn, U_e, x
Esempio n. 24
0
def train():
    with tf.Graph().as_default():
        with tf.device('/gpu:'+str(GPU_INDEX)):
            pointclouds_pl, labels_pl, cls_labels_pl = MODEL.placeholder_inputs(BATCH_SIZE, NUM_POINT)
            is_training_pl = tf.placeholder(tf.bool, shape=())
            print (is_training_pl)
            
            # Note the global_step=batch parameter to minimize. 
            # That tells the optimizer to helpfully increment the 'batch' parameter for you every time it trains.
            batch = tf.Variable(0)
            bn_decay = get_bn_decay(batch)
            tf.summary.scalar('bn_decay', bn_decay)

            print ("--- Get model and loss")
            # Get model and loss 
            pred, end_points = MODEL.get_model(pointclouds_pl, cls_labels_pl, is_training_pl, bn_decay=bn_decay)
            loss = MODEL.get_loss(pred, labels_pl)
            tf.summary.scalar('loss', loss)

            correct = tf.equal(tf.argmax(pred, 2), tf.to_int64(labels_pl))
            accuracy = tf.reduce_sum(tf.cast(correct, tf.float32)) / float(BATCH_SIZE*NUM_POINT)
            tf.summary.scalar('accuracy', accuracy)

            print ("--- Get training operator")
            # Get training operator
            learning_rate = get_learning_rate(batch)
            tf.summary.scalar('learning_rate', learning_rate)
            if OPTIMIZER == 'momentum':
                optimizer = tf.train.MomentumOptimizer(learning_rate, momentum=MOMENTUM)
            elif OPTIMIZER == 'adam':
                optimizer = tf.train.AdamOptimizer(learning_rate)
            train_op = optimizer.minimize(loss, global_step=batch)
            
            # Add ops to save and restore all the variables.
            saver = tf.train.Saver()
        
        # Create a session
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.allow_soft_placement = True
        config.log_device_placement = False
        sess = tf.Session(config=config)

        # Add summary writers
        merged = tf.summary.merge_all()
        train_writer = tf.summary.FileWriter(os.path.join(LOG_DIR, 'train'), sess.graph)
        test_writer = tf.summary.FileWriter(os.path.join(LOG_DIR, 'test'), sess.graph)

        # Init variables
        init = tf.global_variables_initializer()
        sess.run(init)
        #sess.run(init, {is_training_pl: True})

        ops = {'pointclouds_pl': pointclouds_pl,
               'labels_pl': labels_pl,
               'cls_labels_pl': cls_labels_pl,
               'is_training_pl': is_training_pl,
               'pred': pred,
               'loss': loss,
               'train_op': train_op,
               'merged': merged,
               'step': batch,
               'end_points': end_points}

        best_acc = -1
        for epoch in range(MAX_EPOCH):
            log_string('**** EPOCH %03d ****' % (epoch))
            sys.stdout.flush()
             
            train_one_epoch(sess, ops, train_writer)
            eval_one_epoch(sess, ops, test_writer)

            # Save the variables to disk.
            if epoch % 10 == 0:
                save_path = saver.save(sess, os.path.join(LOG_DIR, "model.ckpt"))
                log_string("Model saved in file: %s" % save_path)
def main(argv):
    del argv  # unused arg
    if not FLAGS.use_gpu:
        raise ValueError('Only GPU is currently supported.')
    if FLAGS.num_cores > 1:
        raise ValueError('Only a single accelerator is currently supported.')
    np.random.seed(FLAGS.seed)
    tf.random.set_seed(FLAGS.seed)
    tf.io.gfile.makedirs(FLAGS.output_dir)
    tf1.disable_v2_behavior()

    session = tf1.Session()
    with session.as_default():
        x_train, y_train, x_test, y_test = utils.load(FLAGS.dataset, session)
        n_train = x_train.shape[0]

        num_classes = int(np.amax(y_train)) + 1
        model = lenet5(n_train, x_train.shape[1:], num_classes)
        for l in model.layers:
            l.kl_cost_weight = l.add_weight(
                name='kl_cost_weight',
                shape=(),
                initializer=tf.constant_initializer(0.),
                trainable=False)
            l.kl_cost_bias = l.add_variable(
                name='kl_cost_bias',
                shape=(),
                initializer=tf.constant_initializer(0.),
                trainable=False)

        [negative_log_likelihood, accuracy, log_likelihood, kl,
         elbo] = get_losses_and_metrics(model, n_train)
        metrics = [elbo, log_likelihood, kl, accuracy]
        tensorboard = tf1.keras.callbacks.TensorBoard(
            log_dir=FLAGS.output_dir,
            update_freq=FLAGS.batch_size * FLAGS.validation_freq)

        def fit_fn(model, steps, initial_epoch):
            return model.fit(
                x=x_train,
                y=y_train,
                batch_size=FLAGS.batch_size,
                epochs=initial_epoch + (FLAGS.batch_size * steps) // n_train,
                initial_epoch=initial_epoch,
                validation_data=(x_test, y_test),
                validation_freq=max(
                    (FLAGS.validation_freq * FLAGS.batch_size) // n_train, 1),
                verbose=1,
                callbacks=[tensorboard])

        model.compile(
            optimizer=tf.keras.optimizers.Adam(lr=float(FLAGS.learning_rate)),
            loss=negative_log_likelihood,
            metrics=metrics)
        session.run(tf1.initialize_all_variables())

        train_epochs = (FLAGS.training_steps * FLAGS.batch_size) // n_train
        fit_fn(model, FLAGS.training_steps, initial_epoch=0)

        labels = tf.keras.layers.Input(shape=y_train.shape[1:])
        ll = tf.keras.backend.function([model.input, labels], [
            model.output.distribution.log_prob(tf.squeeze(labels)),
            model.output.distribution.logits
        ])

        base_metrics = [
            utils.ensemble_metrics(x_train, y_train, model, ll, n_samples=10),
            utils.ensemble_metrics(x_test, y_test, model, ll, n_samples=10)
        ]
        model_dir = os.path.join(FLAGS.output_dir, 'models')
        tf.io.gfile.makedirs(model_dir)
        base_model_filename = os.path.join(model_dir, 'base_model.weights')
        model.save_weights(base_model_filename)

        # Train base model further for comparison.
        fit_fn(model,
               FLAGS.n_auxiliary_variables *
               FLAGS.auxiliary_sampling_frequency * FLAGS.ensemble_size,
               initial_epoch=train_epochs)

        overtrained_metrics = [
            utils.ensemble_metrics(x_train, y_train, model, ll, n_samples=10),
            utils.ensemble_metrics(x_test, y_test, model, ll, n_samples=10)
        ]

        # Perform refined VI.
        sample_op = []
        for l in model.layers:
            if isinstance(
                    l, tfp.layers.DenseLocalReparameterization) or isinstance(
                        l, tfp.layers.Convolution2DFlipout):
                weight_op, weight_cost = sample_auxiliary_op(
                    l.kernel_prior.distribution,
                    l.kernel_posterior.distribution,
                    FLAGS.auxiliary_variance_ratio)
                sample_op.append(weight_op)
                sample_op.append(l.kl_cost_weight.assign_add(weight_cost))
                # Fix the variance of the prior
                session.run(l.kernel_prior.distribution.istrainable.assign(0.))
                if hasattr(l.bias_prior, 'distribution'):
                    bias_op, bias_cost = sample_auxiliary_op(
                        l.bias_prior.distribution,
                        l.bias_posterior.distribution,
                        FLAGS.auxiliary_variance_ratio)
                    sample_op.append(bias_op)
                    sample_op.append(l.kl_cost_bias.assign_add(bias_cost))
                    # Fix the variance of the prior
                    session.run(
                        l.bias_prior.distribution.istrainable.assign(0.))

        ensemble_filenames = []
        for i in range(FLAGS.ensemble_size):
            model.load_weights(base_model_filename)
            for j in range(FLAGS.n_auxiliary_variables):
                session.run(sample_op)
                model.compile(
                    optimizer=tf.keras.optimizers.Adam(
                        # The learning rate is proportional to the scale of the prior.
                        lr=float(FLAGS.learning_rate_for_sampling *
                                 np.sqrt(1. -
                                         FLAGS.auxiliary_variance_ratio)**j)),
                    loss=negative_log_likelihood,
                    metrics=metrics)
                fit_fn(model,
                       FLAGS.auxiliary_sampling_frequency,
                       initial_epoch=train_epochs)
            ensemble_filename = os.path.join(
                model_dir, 'ensemble_component_' + str(i) + '.weights')
            ensemble_filenames.append(ensemble_filename)
            model.save_weights(ensemble_filename)

        auxiliary_metrics = [
            utils.ensemble_metrics(x_train,
                                   y_train,
                                   model,
                                   ll,
                                   weight_files=ensemble_filenames,
                                   n_samples=10),
            utils.ensemble_metrics(x_test,
                                   y_test,
                                   model,
                                   ll,
                                   weight_files=ensemble_filenames,
                                   n_samples=10)
        ]

        for metrics, name in [(base_metrics, 'Base model'),
                              (overtrained_metrics, 'Overtrained model'),
                              (auxiliary_metrics, 'Auxiliary sampling')]:
            logging.info(name)
            for metrics_dict, split in [(metrics[0], 'train'),
                                        (metrics[1], 'test')]:
                logging.info(split)
                for metric_name in metrics_dict:
                    logging.info('%s: %s', metric_name,
                                 metrics_dict[metric_name])
Esempio n. 26
0
import tensorflow.compat.v1 as tf
# 读取图像文件
image = tf.read_file("lena512.bmp", 'r')
# 将图像文件解码成Tensor
image_tensor = tf.image.decode_bmp(image)
# 图像张量的尺寸
shape = tf.shape(image_tensor)
session = tf.Session()
print("图像的形状:")
print(session.run(shape))
# Tensor转换为ndarray
image_ndarray = image_tensor.eval(session=session)
print(image_ndarray)
Esempio n. 27
0
# --------- 输出层 -------------
w_fc3 = weight_variable([100, 10])
b_fc3 = bias_variable([10])
prediction = tf.nn.softmax(tf.matmul(h_fc2, w_fc3) + b_fc3)

# ----------- 损失函数(交叉熵损失) ----------------
cross_entropy = tf.reduce_mean(
    -tf.reduce_sum(ys * tf.math.log(prediction), reduction_indices=[1]))
tf.summary.scalar('Loss', cross_entropy)  # loss这里要用scalar。如果是在减小,说明学到东西
train_step = tf.train.AdamOptimizer(1e-4).minimize(
    cross_entropy)  # 使用Adam优化算法,学习率为0.0001来最小化cross_entropy

# ----------- 初始化 ---------------
init = tf.global_variables_initializer()  # 初始化变量
saver = tf.train.Saver()  # 创建保存模型
sess = tf.Session()  # 建立会话
train_writer = tf.summary.FileWriter('logs2/train',
                                     sess.graph)  # 画出train数据集的loss曲线
test_writer = tf.summary.FileWriter('logs2/test',
                                    sess.graph)  # 画出test数据集的loss曲线
merged = tf.summary.merge_all()  # tensorboard 整合
sess.run(init)  # 初始化

# --------- 训练迭代1000次 -------------
for i in range(500):

    batch_x, batch_y = mnist.train.next_batch(100)  # 每次训练的样本集 batch_size=100
    sess.run(train_step, feed_dict={xs: batch_x, ys: batch_y})  # 喂数据

    if i % 100 == 0:  # 迭代10次更新数据
        train_res = sess.run(merged, feed_dict={xs: batch_x, ys: batch_y})
Esempio n. 28
0
def compute_margin(
    input_fn, root_dir, model_config, sess=None,
    batchsize=50, dataset_size=50000):
  """Compute the margins of a model on all input data.

  Loads a given model from given directory and load the parameters in the given
  scope. Iterates over the entire training dataset and computes the upper bound
  on the margin by doing a line search.

  Args:
    input_fn:  function that produces the input and label tensors
    root_dir:  the directory containing the dataset
    model_config: a ModelConfig object that specifies the model
    sess: optional tensorflow session
    batchsize: batch size with which the margin is computed
    dataset_size: number of data points in the dataset

  Returns:
    A dictionary that maps each layer's name to the margins at that layer
    over the entire training set.
  """
  param_path = model_config.get_model_dir_name(root_dir)
  model_fn = model_config.get_model_fn()

  if not sess:
    sess = tf.Session()

  data_format = model_config.data_format
  image_iter, label_iter = input_fn()
  if data_format == 'HWC':
    img_dim = [None, IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS]
  else:
    img_dim = [None, IMG_CHANNELS, IMG_HEIGHT, IMG_WIDTH]
  image = tf.placeholder(tf.float32, shape=img_dim, name='image')
  label = tf.placeholder(
      tf.float32, shape=[None, model_config.num_class], name='label')

  loss_layers = ['inputs', 'h1', 'h2', 'h3']
  end_points_collection = {}
  logits = model_fn(image, is_training=False, perturb_points=loss_layers,
                    normalizer_collection=None,
                    end_points_collection=end_points_collection)

  # set up the graph for computing margin
  layer_activations = [end_points_collection[l] for l in loss_layers]
  layer_margins = margin(logits, label, layer_activations)

  # load model parameters
  sess.run(tf.global_variables_initializer())
  model_config.load_parameters(param_path, sess)

  count = 0
  margin_values = []
  while count < dataset_size:
    try:
      count += batchsize
      image_batch, label_batch = sess.run([image_iter, label_iter])
      label_batch = np.reshape(label_batch, [-1, model_config.num_class])
      fd = {image: image_batch, label: label_batch.astype(np.float32)}
      batch_margin = np.squeeze(list(sess.run(layer_margins, feed_dict=fd)))
      margin_values.append(batch_margin)
    except tf.errors.OutOfRangeError:
      print('reached the end of the data')
      break

  margin_values = np.concatenate(margin_values, axis=1)
  margin_values_map = {}
  for ln, lm in zip(loss_layers, margin_values):
    margin_values_map[ln] = lm

  return margin_values_map
Esempio n. 29
0
W = tf.Variable(tf.random_normal([16, nb_classes]), name='weight')
b = tf.Variable(tf.random_normal([nb_classes]), name='bias')

logits = tf.matmul(X, W) + b
hypothesis = tf.nn.softmax(logits)

cost_i = tf.nn.softmax_cross_entropy_with_logits(logits=logits,
                                                 labels=Y_one_hot)

cost = tf.reduce_mean(cost_i)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1).minimize(cost)

prediction = tf.argmax(hypothesis, 1)
correct_prediction = tf.equal(prediction, tf.argmax(Y_one_hot, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())

    for step in range(2001):
        sess.run(optimizer, feed_dict={X: x_data, Y: y_data})
        if step % 100 == 0:
            loss, acc = sess.run([cost, accuracy],
                                 feed_dict={
                                     X: x_data,
                                     Y: y_data
                                 })
            print("step: {:5}\tLoss: {:.3f}\tAcc: {}".format(step, loss, acc))
    pred = sess.run(prediction, feed_dict={X: x_data})
    for p, y in zip(pred, y_data.flatten()):
        print("[{}] Prediction: {} True Y: {}".format(p == int(y), p, int(y)))
Esempio n. 30
0
from nltk.stem.lancaster import LancasterStemmer
stemmer = LancasterStemmer()
from flask import Flask, jsonify, request, render_template
from flask_cors import CORS, cross_origin
from keras.models import load_model

import pickle
import pandas as pd
import numpy as np
import tensorflow.compat.v1 as tf
import json
import random
from tensorflow.python.keras.backend import set_session

global session
sess = tf.Session()
set_session(sess)
#remember to add a data.pkl here
data = pickle.load(
    open("C:/Users/harip/OneDrive/Desktop/ahad_chatbot/ahad-data.pkl", "rb"))
words = data['words']
classes = data['classes']

with open('trained.json') as json_data:
    intents = json.load(json_data)
lisIndex = []
for indexIntent in intents['intents']:
    lisIndex.append(indexIntent['tag'])


def clean_up_sentence(sentence):